From 02fdb83e73d06983f37c967b0f2cae9b1fbd32ed Mon Sep 17 00:00:00 2001 From: Chris Marchesi Date: Sun, 5 Jun 2016 23:34:11 -0700 Subject: [PATCH] provider/acme: New provider This commit adds the ACME provider, enabling support for automated domain-validated certificate authorities such as Let's Encrypt (https://letsencrypt.org/). There are 2 resources: * acme_registration - This resource manages registrations with an ACME CA, which in the resource is a private key/email contact combination. * acme_certificate - This resource manages certificates, taking domains directly via the resource or through a pre-generated CSR. HTTP, TLS, and DNS challenges are supported, the latter through an assortment of providers. There is no explicit provider configuration for this provider, to address the chicken-and-egg relationship between registrations and certificates. Neither resource has a hard dependency on the other and acme_certificate can use a registration that is not managed by Terraform. Consult the ACME provider documentation at website/source/docs/providers/acme for full details, or at https://www.terraform.io/docs/providers/acme/index.html once the merged documentation is online. Also, as part of this commit, we flag several private key-related fields in the resources in the TLS provider as sensitive, so that they can be used within the resource should the need be there, ensuring that their field data does not get leaked in logs. --- builtin/providers/acme/acme_structure.go | 612 ++ builtin/providers/acme/acme_structure_test.go | 445 + builtin/providers/acme/provider.go | 17 + builtin/providers/acme/provider_test.go | 32 + .../acme/resource_acme_certificate.go | 169 + .../acme/resource_acme_certificate_test.go | 458 + .../acme/resource_acme_registration.go | 49 + .../acme/resource_acme_registration_test.go | 99 + .../providers/tls/resource_cert_request.go | 1 + .../tls/resource_locally_signed_cert.go | 1 + builtin/providers/tls/resource_private_key.go | 5 +- .../tls/resource_self_signed_cert.go | 1 + command/internal_plugin_list.go | 2 + vendor/github.com/JamesClonk/vultr/LICENSE | 22 + .../JamesClonk/vultr/lib/account_info.go | 59 + .../github.com/JamesClonk/vultr/lib/client.go | 224 + vendor/github.com/JamesClonk/vultr/lib/dns.go | 112 + vendor/github.com/JamesClonk/vultr/lib/ip.go | 118 + vendor/github.com/JamesClonk/vultr/lib/iso.go | 23 + vendor/github.com/JamesClonk/vultr/lib/os.go | 24 + .../github.com/JamesClonk/vultr/lib/plans.go | 35 + .../JamesClonk/vultr/lib/regions.go | 24 + .../JamesClonk/vultr/lib/scripts.go | 110 + .../JamesClonk/vultr/lib/servers.go | 386 + .../JamesClonk/vultr/lib/snapshots.go | 50 + .../JamesClonk/vultr/lib/sshkeys.go | 67 + vendor/github.com/juju/ratelimit/LICENSE | 191 + vendor/github.com/juju/ratelimit/README.md | 117 + vendor/github.com/juju/ratelimit/ratelimit.go | 245 + vendor/github.com/juju/ratelimit/reader.go | 51 + vendor/github.com/miekg/dns/AUTHORS | 1 + vendor/github.com/miekg/dns/CONTRIBUTORS | 9 + vendor/github.com/miekg/dns/COPYRIGHT | 9 + vendor/github.com/miekg/dns/LICENSE | 32 + vendor/github.com/miekg/dns/README.md | 151 + vendor/github.com/miekg/dns/client.go | 455 + vendor/github.com/miekg/dns/clientconfig.go | 99 + vendor/github.com/miekg/dns/defaults.go | 282 + vendor/github.com/miekg/dns/dns.go | 104 + vendor/github.com/miekg/dns/dnssec.go | 721 ++ vendor/github.com/miekg/dns/dnssec_keygen.go | 156 + vendor/github.com/miekg/dns/dnssec_keyscan.go | 249 + vendor/github.com/miekg/dns/dnssec_privkey.go | 85 + vendor/github.com/miekg/dns/doc.go | 251 + vendor/github.com/miekg/dns/edns.go | 532 + vendor/github.com/miekg/dns/format.go | 87 + vendor/github.com/miekg/dns/generate.go | 159 + vendor/github.com/miekg/dns/labels.go | 168 + vendor/github.com/miekg/dns/msg.go | 1231 +++ vendor/github.com/miekg/dns/msg_generate.go | 337 + vendor/github.com/miekg/dns/msg_helpers.go | 630 ++ vendor/github.com/miekg/dns/nsecx.go | 119 + vendor/github.com/miekg/dns/privaterr.go | 149 + vendor/github.com/miekg/dns/rawmsg.go | 49 + vendor/github.com/miekg/dns/reverse.go | 38 + vendor/github.com/miekg/dns/sanitize.go | 84 + vendor/github.com/miekg/dns/scan.go | 974 ++ vendor/github.com/miekg/dns/scan_rr.go | 2143 ++++ vendor/github.com/miekg/dns/scanner.go | 43 + vendor/github.com/miekg/dns/server.go | 732 ++ vendor/github.com/miekg/dns/sig0.go | 219 + vendor/github.com/miekg/dns/singleinflight.go | 57 + vendor/github.com/miekg/dns/tlsa.go | 86 + vendor/github.com/miekg/dns/tsig.go | 384 + vendor/github.com/miekg/dns/types.go | 1250 +++ vendor/github.com/miekg/dns/types_generate.go | 271 + vendor/github.com/miekg/dns/udp.go | 58 + vendor/github.com/miekg/dns/udp_linux.go | 73 + vendor/github.com/miekg/dns/udp_other.go | 17 + vendor/github.com/miekg/dns/udp_plan9.go | 34 + vendor/github.com/miekg/dns/udp_windows.go | 34 + vendor/github.com/miekg/dns/update.go | 106 + vendor/github.com/miekg/dns/xfr.go | 244 + vendor/github.com/miekg/dns/zmsg.go | 3462 +++++++ vendor/github.com/miekg/dns/ztypes.go | 828 ++ vendor/github.com/weppos/dnsimple-go/LICENSE | 20 + .../dnsimple-go/dnsimple/authentication.go | 73 + .../weppos/dnsimple-go/dnsimple/contacts.go | 121 + .../weppos/dnsimple-go/dnsimple/dnsimple.go | 192 + .../weppos/dnsimple-go/dnsimple/domains.go | 121 + .../dnsimple-go/dnsimple/domains_records.go | 136 + .../dnsimple-go/dnsimple/domains_zones.go | 24 + .../weppos/dnsimple-go/dnsimple/registrar.go | 131 + .../weppos/dnsimple-go/dnsimple/users.go | 35 + vendor/github.com/xenolf/lego/LICENSE | 21 + .../github.com/xenolf/lego/acme/challenges.go | 16 + vendor/github.com/xenolf/lego/acme/client.go | 798 ++ vendor/github.com/xenolf/lego/acme/crypto.go | 339 + .../xenolf/lego/acme/dns_challenge.go | 279 + .../xenolf/lego/acme/dns_challenge_manual.go | 53 + vendor/github.com/xenolf/lego/acme/error.go | 86 + vendor/github.com/xenolf/lego/acme/http.go | 120 + .../xenolf/lego/acme/http_challenge.go | 41 + .../xenolf/lego/acme/http_challenge_server.go | 79 + vendor/github.com/xenolf/lego/acme/jws.go | 109 + .../github.com/xenolf/lego/acme/messages.go | 117 + .../xenolf/lego/acme/pop_challenge.go | 1 + .../github.com/xenolf/lego/acme/provider.go | 28 + .../xenolf/lego/acme/tls_sni_challenge.go | 67 + .../lego/acme/tls_sni_challenge_server.go | 62 + vendor/github.com/xenolf/lego/acme/utils.go | 29 + .../providers/dns/cloudflare/cloudflare.go | 219 + .../dns/digitalocean/digitalocean.go | 166 + .../lego/providers/dns/dnsimple/dnsimple.go | 141 + .../xenolf/lego/providers/dns/dyn/dyn.go | 274 + .../xenolf/lego/providers/dns/gandi/gandi.go | 472 + .../providers/dns/googlecloud/googlecloud.go | 155 + .../lego/providers/dns/namecheap/namecheap.go | 416 + .../lego/providers/dns/rfc2136/rfc2136.go | 129 + .../lego/providers/dns/route53/route53.go | 170 + .../xenolf/lego/providers/dns/vultr/vultr.go | 127 + vendor/golang.org/x/crypto/ocsp/ocsp.go | 673 ++ vendor/golang.org/x/net/idna/idna.go | 68 + vendor/golang.org/x/net/idna/punycode.go | 200 + vendor/golang.org/x/net/publicsuffix/gen.go | 713 ++ vendor/golang.org/x/net/publicsuffix/list.go | 135 + vendor/golang.org/x/net/publicsuffix/table.go | 8947 +++++++++++++++++ .../gopkg.in/square/go-jose.v1/BUG-BOUNTY.md | 10 + .../square/go-jose.v1/CONTRIBUTING.md | 14 + vendor/gopkg.in/square/go-jose.v1/LICENSE | 202 + vendor/gopkg.in/square/go-jose.v1/README.md | 209 + .../gopkg.in/square/go-jose.v1/asymmetric.go | 498 + .../square/go-jose.v1/cipher/cbc_hmac.go | 196 + .../square/go-jose.v1/cipher/concat_kdf.go | 75 + .../square/go-jose.v1/cipher/ecdh_es.go | 51 + .../square/go-jose.v1/cipher/key_wrap.go | 109 + vendor/gopkg.in/square/go-jose.v1/crypter.go | 349 + vendor/gopkg.in/square/go-jose.v1/doc.go | 26 + vendor/gopkg.in/square/go-jose.v1/encoding.go | 191 + .../gopkg.in/square/go-jose.v1/json/LICENSE | 27 + .../gopkg.in/square/go-jose.v1/json/README.md | 13 + .../gopkg.in/square/go-jose.v1/json/decode.go | 1183 +++ .../gopkg.in/square/go-jose.v1/json/encode.go | 1197 +++ .../gopkg.in/square/go-jose.v1/json/indent.go | 141 + .../square/go-jose.v1/json/scanner.go | 623 ++ .../gopkg.in/square/go-jose.v1/json/stream.go | 480 + .../gopkg.in/square/go-jose.v1/json/tags.go | 44 + .../gopkg.in/square/go-jose.v1/json_fork.go | 31 + vendor/gopkg.in/square/go-jose.v1/json_std.go | 31 + vendor/gopkg.in/square/go-jose.v1/jwe.go | 278 + vendor/gopkg.in/square/go-jose.v1/jwk.go | 408 + vendor/gopkg.in/square/go-jose.v1/jws.go | 252 + vendor/gopkg.in/square/go-jose.v1/shared.go | 224 + vendor/gopkg.in/square/go-jose.v1/signing.go | 218 + .../gopkg.in/square/go-jose.v1/symmetric.go | 349 + vendor/gopkg.in/square/go-jose.v1/utils.go | 74 + vendor/vendor.json | 159 + website/source/assets/stylesheets/_docs.scss | 1 + .../docs/providers/acme/index.html.markdown | 88 + .../acme/r/certificate.html.markdown | 239 + .../acme/r/registration.html.markdown | 64 + website/source/layouts/acme.erb | 29 + website/source/layouts/docs.erb | 4 + 153 files changed, 45139 insertions(+), 2 deletions(-) create mode 100644 builtin/providers/acme/acme_structure.go create mode 100644 builtin/providers/acme/acme_structure_test.go create mode 100644 builtin/providers/acme/provider.go create mode 100644 builtin/providers/acme/provider_test.go create mode 100644 builtin/providers/acme/resource_acme_certificate.go create mode 100644 builtin/providers/acme/resource_acme_certificate_test.go create mode 100644 builtin/providers/acme/resource_acme_registration.go create mode 100644 builtin/providers/acme/resource_acme_registration_test.go create mode 100644 vendor/github.com/JamesClonk/vultr/LICENSE create mode 100644 vendor/github.com/JamesClonk/vultr/lib/account_info.go create mode 100644 vendor/github.com/JamesClonk/vultr/lib/client.go create mode 100644 vendor/github.com/JamesClonk/vultr/lib/dns.go create mode 100644 vendor/github.com/JamesClonk/vultr/lib/ip.go create mode 100644 vendor/github.com/JamesClonk/vultr/lib/iso.go create mode 100644 vendor/github.com/JamesClonk/vultr/lib/os.go create mode 100644 vendor/github.com/JamesClonk/vultr/lib/plans.go create mode 100644 vendor/github.com/JamesClonk/vultr/lib/regions.go create mode 100644 vendor/github.com/JamesClonk/vultr/lib/scripts.go create mode 100644 vendor/github.com/JamesClonk/vultr/lib/servers.go create mode 100644 vendor/github.com/JamesClonk/vultr/lib/snapshots.go create mode 100644 vendor/github.com/JamesClonk/vultr/lib/sshkeys.go create mode 100644 vendor/github.com/juju/ratelimit/LICENSE create mode 100644 vendor/github.com/juju/ratelimit/README.md create mode 100644 vendor/github.com/juju/ratelimit/ratelimit.go create mode 100644 vendor/github.com/juju/ratelimit/reader.go create mode 100644 vendor/github.com/miekg/dns/AUTHORS create mode 100644 vendor/github.com/miekg/dns/CONTRIBUTORS create mode 100644 vendor/github.com/miekg/dns/COPYRIGHT create mode 100644 vendor/github.com/miekg/dns/LICENSE create mode 100644 vendor/github.com/miekg/dns/README.md create mode 100644 vendor/github.com/miekg/dns/client.go create mode 100644 vendor/github.com/miekg/dns/clientconfig.go create mode 100644 vendor/github.com/miekg/dns/defaults.go create mode 100644 vendor/github.com/miekg/dns/dns.go create mode 100644 vendor/github.com/miekg/dns/dnssec.go create mode 100644 vendor/github.com/miekg/dns/dnssec_keygen.go create mode 100644 vendor/github.com/miekg/dns/dnssec_keyscan.go create mode 100644 vendor/github.com/miekg/dns/dnssec_privkey.go create mode 100644 vendor/github.com/miekg/dns/doc.go create mode 100644 vendor/github.com/miekg/dns/edns.go create mode 100644 vendor/github.com/miekg/dns/format.go create mode 100644 vendor/github.com/miekg/dns/generate.go create mode 100644 vendor/github.com/miekg/dns/labels.go create mode 100644 vendor/github.com/miekg/dns/msg.go create mode 100644 vendor/github.com/miekg/dns/msg_generate.go create mode 100644 vendor/github.com/miekg/dns/msg_helpers.go create mode 100644 vendor/github.com/miekg/dns/nsecx.go create mode 100644 vendor/github.com/miekg/dns/privaterr.go create mode 100644 vendor/github.com/miekg/dns/rawmsg.go create mode 100644 vendor/github.com/miekg/dns/reverse.go create mode 100644 vendor/github.com/miekg/dns/sanitize.go create mode 100644 vendor/github.com/miekg/dns/scan.go create mode 100644 vendor/github.com/miekg/dns/scan_rr.go create mode 100644 vendor/github.com/miekg/dns/scanner.go create mode 100644 vendor/github.com/miekg/dns/server.go create mode 100644 vendor/github.com/miekg/dns/sig0.go create mode 100644 vendor/github.com/miekg/dns/singleinflight.go create mode 100644 vendor/github.com/miekg/dns/tlsa.go create mode 100644 vendor/github.com/miekg/dns/tsig.go create mode 100644 vendor/github.com/miekg/dns/types.go create mode 100644 vendor/github.com/miekg/dns/types_generate.go create mode 100644 vendor/github.com/miekg/dns/udp.go create mode 100644 vendor/github.com/miekg/dns/udp_linux.go create mode 100644 vendor/github.com/miekg/dns/udp_other.go create mode 100644 vendor/github.com/miekg/dns/udp_plan9.go create mode 100644 vendor/github.com/miekg/dns/udp_windows.go create mode 100644 vendor/github.com/miekg/dns/update.go create mode 100644 vendor/github.com/miekg/dns/xfr.go create mode 100644 vendor/github.com/miekg/dns/zmsg.go create mode 100644 vendor/github.com/miekg/dns/ztypes.go create mode 100644 vendor/github.com/weppos/dnsimple-go/LICENSE create mode 100644 vendor/github.com/weppos/dnsimple-go/dnsimple/authentication.go create mode 100644 vendor/github.com/weppos/dnsimple-go/dnsimple/contacts.go create mode 100644 vendor/github.com/weppos/dnsimple-go/dnsimple/dnsimple.go create mode 100644 vendor/github.com/weppos/dnsimple-go/dnsimple/domains.go create mode 100644 vendor/github.com/weppos/dnsimple-go/dnsimple/domains_records.go create mode 100644 vendor/github.com/weppos/dnsimple-go/dnsimple/domains_zones.go create mode 100644 vendor/github.com/weppos/dnsimple-go/dnsimple/registrar.go create mode 100644 vendor/github.com/weppos/dnsimple-go/dnsimple/users.go create mode 100644 vendor/github.com/xenolf/lego/LICENSE create mode 100644 vendor/github.com/xenolf/lego/acme/challenges.go create mode 100644 vendor/github.com/xenolf/lego/acme/client.go create mode 100644 vendor/github.com/xenolf/lego/acme/crypto.go create mode 100644 vendor/github.com/xenolf/lego/acme/dns_challenge.go create mode 100644 vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go create mode 100644 vendor/github.com/xenolf/lego/acme/error.go create mode 100644 vendor/github.com/xenolf/lego/acme/http.go create mode 100644 vendor/github.com/xenolf/lego/acme/http_challenge.go create mode 100644 vendor/github.com/xenolf/lego/acme/http_challenge_server.go create mode 100644 vendor/github.com/xenolf/lego/acme/jws.go create mode 100644 vendor/github.com/xenolf/lego/acme/messages.go create mode 100644 vendor/github.com/xenolf/lego/acme/pop_challenge.go create mode 100644 vendor/github.com/xenolf/lego/acme/provider.go create mode 100644 vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go create mode 100644 vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go create mode 100644 vendor/github.com/xenolf/lego/acme/utils.go create mode 100644 vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare.go create mode 100644 vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean.go create mode 100644 vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple.go create mode 100644 vendor/github.com/xenolf/lego/providers/dns/dyn/dyn.go create mode 100644 vendor/github.com/xenolf/lego/providers/dns/gandi/gandi.go create mode 100644 vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud.go create mode 100644 vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap.go create mode 100644 vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136.go create mode 100644 vendor/github.com/xenolf/lego/providers/dns/route53/route53.go create mode 100644 vendor/github.com/xenolf/lego/providers/dns/vultr/vultr.go create mode 100644 vendor/golang.org/x/crypto/ocsp/ocsp.go create mode 100644 vendor/golang.org/x/net/idna/idna.go create mode 100644 vendor/golang.org/x/net/idna/punycode.go create mode 100644 vendor/golang.org/x/net/publicsuffix/gen.go create mode 100644 vendor/golang.org/x/net/publicsuffix/list.go create mode 100644 vendor/golang.org/x/net/publicsuffix/table.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md create mode 100644 vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md create mode 100644 vendor/gopkg.in/square/go-jose.v1/LICENSE create mode 100644 vendor/gopkg.in/square/go-jose.v1/README.md create mode 100644 vendor/gopkg.in/square/go-jose.v1/asymmetric.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/crypter.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/doc.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/encoding.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/json/LICENSE create mode 100644 vendor/gopkg.in/square/go-jose.v1/json/README.md create mode 100644 vendor/gopkg.in/square/go-jose.v1/json/decode.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/json/encode.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/json/indent.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/json/scanner.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/json/stream.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/json/tags.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/json_fork.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/json_std.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/jwe.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/jwk.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/jws.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/shared.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/signing.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/symmetric.go create mode 100644 vendor/gopkg.in/square/go-jose.v1/utils.go create mode 100644 website/source/docs/providers/acme/index.html.markdown create mode 100644 website/source/docs/providers/acme/r/certificate.html.markdown create mode 100644 website/source/docs/providers/acme/r/registration.html.markdown create mode 100644 website/source/layouts/acme.erb diff --git a/builtin/providers/acme/acme_structure.go b/builtin/providers/acme/acme_structure.go new file mode 100644 index 000000000000..7f301065b896 --- /dev/null +++ b/builtin/providers/acme/acme_structure.go @@ -0,0 +1,612 @@ +package acme + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "log" + "os" + "sort" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + "github.com/xenolf/lego/acme" + "github.com/xenolf/lego/providers/dns/cloudflare" + "github.com/xenolf/lego/providers/dns/digitalocean" + "github.com/xenolf/lego/providers/dns/dnsimple" + "github.com/xenolf/lego/providers/dns/dyn" + "github.com/xenolf/lego/providers/dns/gandi" + "github.com/xenolf/lego/providers/dns/googlecloud" + "github.com/xenolf/lego/providers/dns/namecheap" + "github.com/xenolf/lego/providers/dns/rfc2136" + "github.com/xenolf/lego/providers/dns/route53" + "github.com/xenolf/lego/providers/dns/vultr" +) + +// baseACMESchema returns a map[string]*schema.Schema with all the elements +// necessary to build the base elements of an ACME resource schema. Use this, +// along with a schema helper of a specific check type, to return the full +// schema. +func baseACMESchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "server_url": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "account_key_pem": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Sensitive: true, + }, + } +} + +// registrationSchema returns a map[string]*schema.Schema with all the elements +// that are specific to an ACME registration resource. +func registrationSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + // Even though the ACME spec does allow for multiple contact types, lego + // only works with a single email address. + "email_address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "registration_body": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "registration_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "registration_new_authz_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "registration_tos_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + } +} + +// certificateSchema returns a map[string]*schema.Schema with all the elements +// that are specific to an ACME certificate resource. +// +// The initial version of this only supports DNS challenges. +func certificateSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "common_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"certificate_request_pem"}, + }, + "subject_alternative_names": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + ForceNew: true, + ConflictsWith: []string{"certificate_request_pem"}, + }, + "key_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "2048", + ConflictsWith: []string{"certificate_request_pem"}, + ValidateFunc: validateKeyType, + }, + "certificate_request_pem": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"common_name", "subject_alternative_names", "key_type"}, + }, + "min_days_remaining": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 7, + ForceNew: true, + }, + "dns_challenge": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Set: dnsChallengeSetHash, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "provider": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "config": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ValidateFunc: validateDNSChallengeConfig, + }, + }, + }, + ForceNew: true, + }, + "http_challenge_port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 80, + ForceNew: true, + }, + "tls_challenge_port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 443, + ForceNew: true, + }, + "registration_url": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "certificate_domain": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "certificate_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "account_ref": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "private_key_pem": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "certificate_pem": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "issuer_pem": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + } +} + +// registrationSchemaFull returns a merged baseACMESchema + registrationSchema. +func registrationSchemaFull() map[string]*schema.Schema { + m := baseACMESchema() + for k, v := range registrationSchema() { + m[k] = v + } + return m +} + +// certificateSchemaFull returns a merged baseACMESchema + certificateSchema. +func certificateSchemaFull() map[string]*schema.Schema { + m := baseACMESchema() + for k, v := range certificateSchema() { + m[k] = v + } + return m +} + +// acmeUser implements acme.User. +type acmeUser struct { + + // The email address for the account. + Email string + + // The registration resource object. + Registration *acme.RegistrationResource + + // The private key for the account. + key crypto.PrivateKey +} + +func (u acmeUser) GetEmail() string { + return u.Email +} +func (u acmeUser) GetRegistration() *acme.RegistrationResource { + return u.Registration +} +func (u acmeUser) GetPrivateKey() crypto.PrivateKey { + return u.key +} + +// expandACMEUser creates a new instance of an ACME user from set +// email_address and private_key_pem fields, and a registration +// if one exists. +func expandACMEUser(d *schema.ResourceData) (*acmeUser, error) { + key, err := privateKeyFromPEM([]byte(d.Get("account_key_pem").(string))) + if err != nil { + return nil, err + } + + user := &acmeUser{ + key: key, + } + + // only set these email if it's in the schema. + if v, ok := d.GetOk("email_address"); ok { + user.Email = v.(string) + } + + return user, nil +} + +// expandACMERegistration takes a *schema.ResourceData and builds an +// *acme.RegistrationResource, complete with body, for use in lego calls. +// +// Note that this function will return nil, false if any of the computed +// registration fields are un-readable, to allow non-fatal behaviour when the +// data does not exist. +// +// TODO: This function is currently unused as registrations do not currently +// seem to be deletable in Let's Encrypt. +// Leaving it here, with an appropriate unit test, so that once this +// functionality becomes available it will be easy to grab the reg for deletion. +func expandACMERegistration(d *schema.ResourceData) (*acme.RegistrationResource, bool) { + reg := acme.RegistrationResource{} + var v interface{} + var ok bool + + if v, ok = d.GetOk("registration_body"); ok == false { + return nil, false + } + body := acme.Registration{} + err := json.Unmarshal([]byte(v.(string)), &body) + if err != nil { + log.Printf("[DEBUG] Error reading JSON for registration body: %s", err.Error()) + return nil, false + } + reg.Body = body + + if v, ok = d.GetOk("registration_url"); ok == false { + return nil, false + } + reg.URI = v.(string) + + if v, ok = d.GetOk("registration_new_authz_url"); ok == false { + return nil, false + } + reg.NewAuthzURL = v.(string) + + // TOS url can be blank, ensure we don't fail on this + if v, ok = d.GetOk("registration_tos_url"); ok == false { + log.Printf("[DEBUG] ACME reg %s: registration_tos_url is blank", reg.URI) + } + reg.TosURL = v.(string) + + return ®, true +} + +// saveACMERegistration takes an *acme.RegistrationResource and sets the appropriate fields +// for a registration resource. +func saveACMERegistration(d *schema.ResourceData, reg *acme.RegistrationResource) error { + d.SetId(reg.URI) + + body, err := json.Marshal(reg.Body) + if err != nil { + return fmt.Errorf("error reading registration body: %s", err.Error()) + } + d.Set("registration_body", string(body)) + + d.Set("registration_url", reg.URI) + d.Set("registration_new_authz_url", reg.NewAuthzURL) + d.Set("registration_tos_url", reg.TosURL) + + return nil +} + +// expandACMEClient creates a connection to an ACME server from resource data, +// and also returns the user. +// +// If regURL is supplied, the registration information is loaded in to the user's registration +// via the registration URL. +func expandACMEClient(d *schema.ResourceData, regURL string) (*acme.Client, *acmeUser, error) { + user, err := expandACMEUser(d) + if err != nil { + return nil, nil, fmt.Errorf("Error getting user data: %s", err.Error()) + } + + // Note this function is used by both the registration and certificate + // resources, but key type is not necessary during registration, so + // it's okay if it's empty for that. + var keytype string + if v, ok := d.GetOk("key_type"); ok { + keytype = v.(string) + } + + client, err := acme.NewClient(d.Get("server_url").(string), user, acme.KeyType(keytype)) + if err != nil { + return nil, nil, err + } + + if regURL != "" { + user.Registration = &acme.RegistrationResource{ + URI: regURL, + } + reg, err := client.QueryRegistration() + if err != nil { + return nil, nil, err + } + user.Registration = reg + } + + return client, user, nil +} + +// expandCertificateResource takes saved state in the certificate resource +// and returns an acme.CertificateResource. +func expandCertificateResource(d *schema.ResourceData) acme.CertificateResource { + cert := acme.CertificateResource{ + Domain: d.Get("certificate_domain").(string), + CertURL: d.Get("certificate_url").(string), + AccountRef: d.Get("account_ref").(string), + PrivateKey: []byte(d.Get("private_key_pem").(string)), + Certificate: []byte(d.Get("certificate_pem").(string)), + CSR: []byte(d.Get("certificate_request_pem").(string)), + } + return cert +} + +// saveCertificateResource takes an acme.CertificateResource and sets fields. +func saveCertificateResource(d *schema.ResourceData, cert acme.CertificateResource) error { + d.SetId(cert.CertURL) + d.Set("certificate_domain", cert.Domain) + d.Set("certificate_url", cert.CertURL) + d.Set("account_ref", cert.AccountRef) + d.Set("private_key_pem", string(cert.PrivateKey)) + issued, issuer, err := splitPEMBundle(cert.Certificate) + if err != nil { + return err + } + d.Set("certificate_pem", string(issued)) + d.Set("issuer_pem", string(issuer)) + // note that CSR is skipped as it is not computed. + return nil +} + +// certDaysRemaining takes an acme.CertificateResource, parses the +// certificate, and computes the days that it has remaining. +func certDaysRemaining(cert acme.CertificateResource) (int64, error) { + x509Certs, err := parsePEMBundle(cert.Certificate) + if err != nil { + return 0, err + } + c := x509Certs[0] + + if c.IsCA { + return 0, fmt.Errorf("First certificate is a CA certificate") + } + + expiry := c.NotAfter.Unix() + now := time.Now().Unix() + + return (expiry - now) / 86400, nil +} + +// splitPEMBundle get a slice of x509 certificates from parsePEMBundle, +// and always returns 2 certificates - the issued cert first, and the issuer +// certificate second. +// +// if the certificate count in a bundle is != 2 then this function will fail. +func splitPEMBundle(bundle []byte) (cert, issuer []byte, err error) { + cb, err := parsePEMBundle(bundle) + if err != nil { + return + } + if len(cb) != 2 { + err = fmt.Errorf("Certificate bundle does not contain exactly 2 certificates") + return + } + // lego always returns the issued cert first, if the CA is first there is a problem + if cb[0].IsCA { + err = fmt.Errorf("First certificate is a CA certificate") + return + } + + cert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cb[0].Raw}) + issuer = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cb[1].Raw}) + return +} + +// parsePEMBundle parses a certificate bundle from top to bottom and returns +// a slice of x509 certificates. This function will error if no certificates are found. +// +// TODO: This was taken from lego directly, consider exporting it there, or +// consolidating with other TF crypto functions. +func parsePEMBundle(bundle []byte) ([]*x509.Certificate, error) { + var certificates []*x509.Certificate + var certDERBlock *pem.Block + + for { + certDERBlock, bundle = pem.Decode(bundle) + if certDERBlock == nil { + break + } + + if certDERBlock.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(certDERBlock.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } + } + + if len(certificates) == 0 { + return nil, errors.New("No certificates were found while parsing the bundle.") + } + + return certificates, nil +} + +// setDNSChallenge takes an *acme.Client and the DNS challenge complex +// structure as a map[string]interface{}, and configues the client to only +// allow a DNS challenge with the configured provider. +func setDNSChallenge(client *acme.Client, m map[string]interface{}) error { + var provider acme.ChallengeProvider + var err error + var providerName string + + if v, ok := m["provider"]; ok && v.(string) != "" { + providerName = v.(string) + } else { + return fmt.Errorf("DNS challenge provider not defined") + } + // Config only needs to be set if it's defined, otherwise existing env/SDK + // defaults are fine. + if v, ok := m["config"]; ok { + for k, v := range v.(map[string]interface{}) { + os.Setenv(k, v.(string)) + } + } + + // The below list was taken from lego's cli_handlers.go from this writing + // (June 2016). As such, this might not be a full list of supported + // providers. If a specific provider is wanted, check lego first, and then + // write the provider for lego and put in a PR. + switch providerName { + case "cloudflare": + provider, err = cloudflare.NewDNSProvider() + case "digitalocean": + provider, err = digitalocean.NewDNSProvider() + case "dnsimple": + provider, err = dnsimple.NewDNSProvider() + case "dyn": + provider, err = dyn.NewDNSProvider() + case "gandi": + provider, err = gandi.NewDNSProvider() + case "gcloud": + provider, err = googlecloud.NewDNSProvider() + case "manual": + provider, err = acme.NewDNSProviderManual() + case "namecheap": + provider, err = namecheap.NewDNSProvider() + case "route53": + provider, err = route53.NewDNSProvider() + case "rfc2136": + provider, err = rfc2136.NewDNSProvider() + case "vultr": + provider, err = vultr.NewDNSProvider() + default: + return fmt.Errorf("%s: unsupported DNS challenge provider", providerName) + } + if err != nil { + return err + } + client.SetChallengeProvider(acme.DNS01, provider) + client.ExcludeChallenges([]acme.Challenge{acme.HTTP01, acme.TLSSNI01}) + + return nil +} + +// dnsChallengeSetHash computes the hash for the DNS challenge. +func dnsChallengeSetHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["provider"].(string))) + // sort the keys first so that the hash is consistent every time + keys := []string{} + for k := range m["config"].(map[string]interface{}) { + keys = append(keys, k) + } + sort.Strings(keys) + // now write out the hash values + for _, k := range keys { + buf.WriteString(fmt.Sprintf("%s-%s-", k, m["config"].(map[string]interface{})[k].(string))) + } + return hashcode.String(buf.String()) +} + +// stringSlice converts an interface slice to a string slice. +func stringSlice(src []interface{}) []string { + var dst []string + for _, v := range src { + dst = append(dst, v.(string)) + } + return dst +} + +// privateKeyFromPEM converts a PEM block into a crypto.PrivateKey. +func privateKeyFromPEM(pemData []byte) (crypto.PrivateKey, error) { + var result *pem.Block + rest := pemData + for { + result, rest = pem.Decode([]byte(rest)) + if result == nil { + return nil, fmt.Errorf("Cannot decode supplied PEM data") + } + switch result.Type { + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(result.Bytes) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(result.Bytes) + } + } +} + +// csrFromPEM converts a PEM block into an *x509.CertificateRequest. +func csrFromPEM(pemData []byte) (*x509.CertificateRequest, error) { + var result *pem.Block + rest := pemData + for { + result, rest = pem.Decode([]byte(rest)) + if result == nil { + return nil, fmt.Errorf("Cannot decode supplied PEM data") + } + if result.Type == "CERTIFICATE REQUEST" { + return x509.ParseCertificateRequest(result.Bytes) + } + } +} + +// validateKeyType validates a key_type resource parameter is correct. +func validateKeyType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + found := false + for _, w := range []string{"P256", "P384", "2048", "4096", "8192"} { + if value == w { + found = true + } + } + if found == false { + errors = append(errors, fmt.Errorf( + "Certificate key type must be one of P256, P384, 2048, 4096, or 8192")) + } + return +} + +// validateDNSChallengeConfig ensures that the values supplied to the +// dns_challenge resource parameter in the acme_certificate resource +// are string values only. +func validateDNSChallengeConfig(v interface{}, k string) (ws []string, errors []error) { + value := v.(map[string]interface{}) + bad := false + for _, w := range value { + switch w.(type) { + case string: + continue + default: + bad = true + } + } + if bad == true { + errors = append(errors, fmt.Errorf( + "DNS challenge config map values must be strings only")) + } + return +} diff --git a/builtin/providers/acme/acme_structure_test.go b/builtin/providers/acme/acme_structure_test.go new file mode 100644 index 000000000000..8daf24b7c0df --- /dev/null +++ b/builtin/providers/acme/acme_structure_test.go @@ -0,0 +1,445 @@ +package acme + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "reflect" + "testing" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/xenolf/lego/acme" +) + +const testDirResponseText = ` +{ + "new-reg": "https://example.com/acme/new-reg", + "new-authz": "https://example.com/acme/new-authz", + "new-cert": "https://example.com/acme/new-cert", + "revoke-cert": "https://example.com/acme/revoke-cert", + "meta": { + "terms-of-service": "https://example.com/acme/terms", + "website": "https://www.example.com/", + "caa-identities": ["example.com"] + } +} +` + +func newHTTPTestServer(f func(w http.ResponseWriter, r *http.Request)) *httptest.Server { + ts := httptest.NewServer(http.HandlerFunc(f)) + return ts +} + +func httpDirTestServer() *httptest.Server { + return newHTTPTestServer(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + http.Error(w, testDirResponseText, http.StatusOK) + }) +} + +const testPrivateKeyText = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA8XXIc0dO5okTzukP2USWm5tbxY6FQzzvBbOpxIfVpdKpZcKV +HfemqCZEIGu/3P3gI6rAYmDRYvLsbKSjKA5EzuUvVxrLzqPZyFI5mzF0gGEzEvYk +Z+mCPLsS5VwaXCySiz6vIBpItw6KOHByt5v8iMtgppGmjWX5N2oeVZ5314xmXFV3 +OMlniyC1uLk6Y/bVtv/vK1mOATXP5vejpjBHdk/VYTTXRZp3zSZllKJbtbt2CxY4 +eA55oCc9cfF46rNPsAsiH5iGbBFIIDSqscukZ9BtBZUj+kO+63he0SedzppuosKi +i9YtjgG1Mb81vgMFZ/SQeiR5FONWcH61jTSkiQIDAQABAoIBAQDJVYK8zLq3c5k2 +sBLtAUnrmhFdm0b3F7neMT7fhrvYtt1U4njgMf2eu7mWpwGmTXI1i007OqudLB2D +QYxh+/PX6DYfFVLXjLwtUpKCGyyfV2z05JTaqFRWO064PKImNWxD+xKfXAtByDfs +c6bT/pcFoT+H5G7R/DNfx3ZfwfD/oo2aUCQT8PrwzQ9cjJuLYzu5Dwma29Cxtajd +Gsdrd09Qkm0PCM3c0FHG7fV3zq5SNw53tP0U0lNzSzpRiS6wmLAPDy3CcKGaj+9K +5YIE3OoQKRFn7hQkHxgnZlBJJU2BOBAOMJA6s28iRNy3pQOzR0M2kqf+YTQk/i13 +if2/mvU5AoGBAPtT9XVbOu6U4Q9WyBSi5nI4AG7gHeJtPC2UWUeaCdj5FJlrEkeD +QZTzqT9KUNu5PfwgsCzCeAzZavQKXDXq7yAtIBIC8bK2sIGhM+bz7Nbu9fPrtmV0 +uk5Enlpi2Y/pUFrRTn27FghZAEgWWUF2Drq0kThEZka3jXveBZ7KaHnfAoGBAPXy +3TVsw0Y34ZljmbsHAyT90ZG7FnA3PDXXHOZxEIDo89m8OTGeBW4eqhLvKa3t+thM +oUGyWTtrjKLELuGa8fiDpKq1b8NJqQYB4V0NJlfOYZ6G8Q+hrT+jXTC4+Lb7kmJq +tyIODlyg4B0GQLbFBZXc4FkwWZXxDT+JwKynh36XAoGBAOWsGhm+3yH755fO5FUH +cLRcPPkV0fmDfYThlpz6RZmENbDlyfSUHDB0Yuw1i6Lfq6dmb9jXdkG3xidx+EZF +hXTQCAitrBZ3IOG1YOrjakIYaacYdrxMaZzw1A0hXFRJEGeN8r6vYzkJrFo0IijS +LC+upy7WQujJAIB7qoMr0UHdAoGAEHTEikuRsUQR6zJ32cS5WCNHf2m2MaHwfGW9 +QEn2Ybm0fzAR35kEIf8ZQBUSg9m1e/18mKm3QLuMeGOKA3xbjlY4kVd8d+OY1JcR +nilAFIXxkCrVPEeEEQr8NENcGNoyTDV5tWSdX2NAO5DsiY4bNpDFzhHnHJo5WbP8 +2VCIR1cCgYAtcMtavC0nIPxmMEkpd9k+0qWcYclt73wr71sQ+kGOU1/M4g8SZh2z +QmXDkRpJf+/xpaeknf6bj6x2r7FXfVoG5vNdB+Cdn3uepkRHPLSStTIwpPVTsQVy +aTVLTgFnTNMM8whCrfR4eBwHVJIejHiA3cl5Ocq/J6u4kgtFkfwKaQ== +-----END RSA PRIVATE KEY----- +` + +// testBadCertBundleText is just simply the LE test intermediate cert +const testBadCertBundleText = ` +-----BEGIN CERTIFICATE----- +MIIEqzCCApOgAwIBAgIRAIvhKg5ZRO08VGQx8JdhT+UwDQYJKoZIhvcNAQELBQAw +GjEYMBYGA1UEAwwPRmFrZSBMRSBSb290IFgxMB4XDTE2MDUyMzIyMDc1OVoXDTM2 +MDUyMzIyMDc1OVowIjEgMB4GA1UEAwwXRmFrZSBMRSBJbnRlcm1lZGlhdGUgWDEw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDtWKySDn7rWZc5ggjz3ZB0 +8jO4xti3uzINfD5sQ7Lj7hzetUT+wQob+iXSZkhnvx+IvdbXF5/yt8aWPpUKnPym +oLxsYiI5gQBLxNDzIec0OIaflWqAr29m7J8+NNtApEN8nZFnf3bhehZW7AxmS1m0 +ZnSsdHw0Fw+bgixPg2MQ9k9oefFeqa+7Kqdlz5bbrUYV2volxhDFtnI4Mh8BiWCN +xDH1Hizq+GKCcHsinDZWurCqder/afJBnQs+SBSL6MVApHt+d35zjBD92fO2Je56 +dhMfzCgOKXeJ340WhW3TjD1zqLZXeaCyUNRnfOmWZV8nEhtHOFbUCU7r/KkjMZO9 +AgMBAAGjgeMwgeAwDgYDVR0PAQH/BAQDAgGGMBIGA1UdEwEB/wQIMAYBAf8CAQAw +HQYDVR0OBBYEFMDMA0a5WCDMXHJw8+EuyyCm9Wg6MHoGCCsGAQUFBwEBBG4wbDA0 +BggrBgEFBQcwAYYoaHR0cDovL29jc3Auc3RnLXJvb3QteDEubGV0c2VuY3J5cHQu +b3JnLzA0BggrBgEFBQcwAoYoaHR0cDovL2NlcnQuc3RnLXJvb3QteDEubGV0c2Vu +Y3J5cHQub3JnLzAfBgNVHSMEGDAWgBTBJnSkikSg5vogKNhcI5pFiBh54DANBgkq +hkiG9w0BAQsFAAOCAgEABYSu4Il+fI0MYU42OTmEj+1HqQ5DvyAeyCA6sGuZdwjF +UGeVOv3NnLyfofuUOjEbY5irFCDtnv+0ckukUZN9lz4Q2YjWGUpW4TTu3ieTsaC9 +AFvCSgNHJyWSVtWvB5XDxsqawl1KzHzzwr132bF2rtGtazSqVqK9E07sGHMCf+zp +DQVDVVGtqZPHwX3KqUtefE621b8RI6VCl4oD30Olf8pjuzG4JKBFRFclzLRjo/h7 +IkkfjZ8wDa7faOjVXx6n+eUQ29cIMCzr8/rNWHS9pYGGQKJiY2xmVC9h12H99Xyf +zWE9vb5zKP3MVG6neX1hSdo7PEAb9fqRhHkqVsqUvJlIRmvXvVKTwNCP3eCjRCCI +PTAvjV+4ni786iXwwFYNz8l3PmPLCyQXWGohnJ8iBm+5nk7O2ynaPVW0U2W+pt2w +SVuvdDM5zGv2f9ltNWUiYZHJ1mmO97jSY/6YfdOUH66iRtQtDkHBRdkNBsMbD+Em +2TgBldtHNSJBfB3pm9FblgOcJ0FSWcUDWJ7vO0+NTXlgrRofRT6pVywzxVo6dND0 +WzYlTWeUVsO40xJqhgUQRER9YLOLxJ0O6C8i0xFxAMKOtSdodMB3RIwt7RFQ0uyt +n5Z5MqkYhlMI3J1tPRTp1nEt9fyGspBOO05gi148Qasp+3N+svqKomoQglNoAxU= +-----END CERTIFICATE----- +` + +const testRegJSONText = ` +{ + "resource": "reg", + "id": 224403, + "key": { + "kty": "RSA", + "use": "sig", + "n": "n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT-O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqVwGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj-oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuCLqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5gHdrNP5zw", + "e": "AQAB" + }, + "contact": ["mailto:nobody@example.com"], + "agreement": "https://letsencrypt.org/documents/LE-SA-v1.0.1-July-27-2015.pdf" +} +` + +func testRegData() *acme.RegistrationResource { + reg := acme.RegistrationResource{} + body := acme.Registration{} + err := json.Unmarshal([]byte(testRegJSONText), &body) + if err != nil { + panic(fmt.Errorf("Error reading JSON for registration body: %s", err.Error())) + } + reg.Body = body + reg.URI = "https://acme-staging.api.letsencrypt.org/acme/reg/123456789" + reg.NewAuthzURL = "https://acme-staging.api.letsencrypt.org/acme/new-authz" + reg.TosURL = "https://letsencrypt.org/documents/LE-SA-v1.0.1-July-27-2015.pdf" + return ® +} + +func registrationResourceData() *schema.ResourceData { + r := &schema.Resource{ + Schema: registrationSchemaFull(), + } + d := r.TestResourceData() + + d.SetId("regurl") + d.Set("server_url", "https://acme-staging.api.letsencrypt.org/directory") + d.Set("account_key_pem", testPrivateKeyText) + d.Set("email_address", "nobody@example.com") + d.Set("registration_body", testRegJSONText) + d.Set("registration_url", "https://acme-staging.api.letsencrypt.org/acme/reg/123456789") + d.Set("registration_new_authz_url", "https://acme-staging.api.letsencrypt.org/acme/new-authz") + d.Set("registration_tos_url", "https://letsencrypt.org/documents/LE-SA-v1.0.1-July-27-2015.pdf") + + return d +} + +func blankBaseResource() *schema.ResourceData { + r := &schema.Resource{ + Schema: baseACMESchema(), + } + d := r.TestResourceData() + d.Set("account_key_pem", testPrivateKeyText) + return d +} + +func blankCertificateResource() *schema.ResourceData { + r := &schema.Resource{ + Schema: certificateSchemaFull(), + } + d := r.TestResourceData() + return d +} + +func TestACME_registrationSchemaFull(t *testing.T) { + m := registrationSchemaFull() + fields := []string{"email_address", "registration_body", "registration_url", "registration_new_authz_url", "registration_tos_url"} + for _, v := range fields { + if _, ok := m[v]; ok == false { + t.Fatalf("Expected %s to be present", v) + } + } +} + +func TestACME_certificateSchema(t *testing.T) { + m := certificateSchemaFull() + fields := []string{ + "common_name", + "subject_alternative_names", + "key_type", + "certificate_request_pem", + "min_days_remaining", + "dns_challenge", + "http_challenge_port", + "tls_challenge_port", + "registration_url", + "certificate_domain", + "certificate_url", + "account_ref", + "private_key_pem", + "certificate_pem", + } + for _, v := range fields { + if _, ok := m[v]; ok == false { + t.Fatalf("Expected %s to be present", v) + } + } +} + +func TestACME_expandACMEUser(t *testing.T) { + d := registrationResourceData() + u, err := expandACMEUser(d) + if err != nil { + t.Fatalf("fatal: %s", err.Error()) + } + + if u.GetEmail() != "nobody@example.com" { + t.Fatalf("Expected email to be nobody@example.com, got %s", u.GetEmail()) + } + + key, err := privateKeyFromPEM([]byte(testPrivateKeyText)) + if err != nil { + t.Fatalf("fatal: %s", err.Error()) + } + + if reflect.DeepEqual(key, u.GetPrivateKey()) == false { + t.Fatalf("Expected private key to be %#v, got %#v", key, u.GetPrivateKey()) + } +} + +func TestACME_expandACMEUser_badKey(t *testing.T) { + d := registrationResourceData() + d.Set("account_key_pem", "bad") + _, err := expandACMEUser(d) + if err == nil { + t.Fatalf("expected error due to bad key") + } +} + +func TestACME_expandACMERegistration(t *testing.T) { + d := registrationResourceData() + actual, ok := expandACMERegistration(d) + if ok == false { + t.Fatalf("Got error while expanding registration") + } + + expected := testRegData() + if reflect.DeepEqual(expected, actual) == false { + t.Fatalf("Expected %#v, got %#v", expected, actual) + } +} + +func TestACME_expandACMERegistration_noBody(t *testing.T) { + d := registrationResourceData() + d.Set("registration_body", "") + if _, ok := expandACMERegistration(d); ok { + t.Fatalf("Expected error due to empty body") + } +} + +func TestACME_expandACMERegistration_badBody(t *testing.T) { + d := registrationResourceData() + d.Set("registration_body", "foo") + if _, ok := expandACMERegistration(d); ok { + t.Fatalf("Expected error due to bad body data") + } +} + +func TestACME_expandACMERegistration_noRegURL(t *testing.T) { + d := registrationResourceData() + d.Set("registration_url", "") + if _, ok := expandACMERegistration(d); ok { + t.Fatalf("Expected error due to empty reg URL") + } +} + +func TestACME_expandACMERegistration_noNewAuthZ(t *testing.T) { + d := registrationResourceData() + d.Set("registration_new_authz_url", "") + if _, ok := expandACMERegistration(d); ok { + t.Fatalf("Expected error due to empty new-authz URL") + } +} + +func TestACME_expandACMERegistration_noTOS(t *testing.T) { + d := registrationResourceData() + d.Set("registration_tos_url", "") + if _, ok := expandACMERegistration(d); ok == false { + t.Fatalf("Blank TOS URL should have been OK") + } +} + +func TestACME_expandACMEClient_badKey(t *testing.T) { + d := registrationResourceData() + d.Set("account_key_pem", "bad") + _, _, err := expandACMEClient(d, "") + if err == nil { + t.Fatalf("expected error due to bad key") + } +} + +func TestACME_expandACMEClient_badURL(t *testing.T) { + d := registrationResourceData() + d.Set("server_url", "bad://") + _, _, err := expandACMEClient(d, "") + if err == nil { + t.Fatalf("expected error due to bad URL") + } +} + +func TestACME_expandACMEClient_badRegURL(t *testing.T) { + d := registrationResourceData() + _, _, err := expandACMEClient(d, "bad://") + if err == nil { + t.Fatalf("expected error due to bad reg URL") + } +} + +func TestACME_expandACMEClient_noCertData(t *testing.T) { + c := acme.CertificateResource{} + _, err := certDaysRemaining(c) + if err == nil { + t.Fatalf("expected error due to bad cert data") + } +} + +func TestACME_parsePEMBundle_noData(t *testing.T) { + b := []byte{} + _, err := parsePEMBundle(b) + if err == nil { + t.Fatalf("expected error due to no PEM data") + } +} + +func TestACME_setDNSChallenge_noProvider(t *testing.T) { + m := make(map[string]interface{}) + d := blankBaseResource() + ts := httpDirTestServer() + d.Set("server_url", ts.URL) + client, _, err := expandACMEClient(d, "") + if err != nil { + t.Fatalf("fatal: %s", err.Error()) + } + + err = setDNSChallenge(client, m) + if err == nil { + t.Fatalf("should have errored due to no provider supplied") + } +} + +func TestACME_setDNSChallenge_unsuppotedProvider(t *testing.T) { + m := map[string]interface{}{ + "provider": "foo", + } + d := blankBaseResource() + ts := httpDirTestServer() + d.Set("server_url", ts.URL) + client, _, err := expandACMEClient(d, "") + if err != nil { + t.Fatalf("fatal: %s", err.Error()) + } + + err = setDNSChallenge(client, m) + if err == nil { + t.Fatalf("should have errored due to unknown provider") + } +} + +func TestACME_saveCertificateResource_badCert(t *testing.T) { + b := testBadCertBundleText + c := acme.CertificateResource{ + Certificate: []byte(b), + } + d := blankCertificateResource() + err := saveCertificateResource(d, c) + if err == nil { + t.Fatalf("expected error due to bad cert data") + } +} + +func TestACME_certDaysRemaining_CACert(t *testing.T) { + b := testBadCertBundleText + c := acme.CertificateResource{ + Certificate: []byte(b), + } + _, err := certDaysRemaining(c) + if err == nil { + t.Fatalf("expected error due to cert being a CA") + } +} + +func TestACME_splitPEMBundle_noData(t *testing.T) { + b := []byte{} + _, _, err := splitPEMBundle(b) + if err == nil { + t.Fatalf("expected error due to no PEM data") + } +} + +func TestACME_splitPEMBundle_CAFirst(t *testing.T) { + b := testBadCertBundleText + testBadCertBundleText + _, _, err := splitPEMBundle([]byte(b)) + if err == nil { + t.Fatalf("expected error due to CA cert being first") + } +} + +func TestACME_splitPEMBundle_singleCert(t *testing.T) { + b := testBadCertBundleText + _, _, err := splitPEMBundle([]byte(b)) + if err == nil { + t.Fatalf("expected error due to only one cert being present") + } +} + +func TestACME_validateKeyType(t *testing.T) { + s := "2048" + + _, errs := validateKeyType(s, "key_type") + if len(errs) > 0 { + t.Fatalf("bad: %#v", errs) + } +} + +func TestACME_validateKeyType_invalid(t *testing.T) { + s := "512" + + _, errs := validateKeyType(s, "key_type") + if len(errs) < 1 { + t.Fatalf("should have given an error") + } +} + +func TestACME_validateDNSChallengeConfig(t *testing.T) { + m := map[string]interface{}{ + "AWS_FOO": "bar", + } + + _, errs := validateDNSChallengeConfig(m, "config") + if len(errs) > 0 { + t.Fatalf("bad: %#v", errs) + } +} + +func TestACME_validateDNSChallengeConfig_invalid(t *testing.T) { + s := map[string]interface{}{ + "AWS_FOO": 1, + } + + _, errs := validateDNSChallengeConfig(s, "config") + if len(errs) < 1 { + t.Fatalf("should have given an error") + } +} diff --git a/builtin/providers/acme/provider.go b/builtin/providers/acme/provider.go new file mode 100644 index 000000000000..a67bde464594 --- /dev/null +++ b/builtin/providers/acme/provider.go @@ -0,0 +1,17 @@ +package acme + +import ( + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +// Provider returns the terraform.ResourceProvider structure for the ACME +// provider. +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + ResourcesMap: map[string]*schema.Resource{ + "acme_registration": resourceACMERegistration(), + "acme_certificate": resourceACMECertificate(), + }, + } +} diff --git a/builtin/providers/acme/provider_test.go b/builtin/providers/acme/provider_test.go new file mode 100644 index 000000000000..169aa06dd030 --- /dev/null +++ b/builtin/providers/acme/provider_test.go @@ -0,0 +1,32 @@ +package acme + +import ( + "testing" + + "github.com/hashicorp/terraform/builtin/providers/aws" + "github.com/hashicorp/terraform/builtin/providers/tls" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +var testAccProvider *schema.Provider +var testAccProviders map[string]terraform.ResourceProvider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "acme": testAccProvider, + "tls": tls.Provider().(*schema.Provider), + "aws": aws.Provider().(*schema.Provider), + } +} + +func TestProvider(t *testing.T) { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} diff --git a/builtin/providers/acme/resource_acme_certificate.go b/builtin/providers/acme/resource_acme_certificate.go new file mode 100644 index 000000000000..a4fb80c23760 --- /dev/null +++ b/builtin/providers/acme/resource_acme_certificate.go @@ -0,0 +1,169 @@ +package acme + +import ( + "fmt" + "log" + "strconv" + "strings" + "time" + + "golang.org/x/crypto/ocsp" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/xenolf/lego/acme" +) + +func resourceACMECertificate() *schema.Resource { + return &schema.Resource{ + Create: resourceACMECertificateCreate, + Read: resourceACMECertificateRead, + Delete: resourceACMECertificateDelete, + + Schema: certificateSchemaFull(), + } +} + +func resourceACMECertificateCreate(d *schema.ResourceData, meta interface{}) error { + client, _, err := expandACMEClient(d, d.Get("registration_url").(string)) + if err != nil { + return err + } + + if v, ok := d.GetOk("dns_challenge"); ok { + setDNSChallenge(client, v.(*schema.Set).List()[0].(map[string]interface{})) + } else { + client.SetHTTPAddress(":" + strconv.Itoa(d.Get("http_challenge_port").(int))) + client.SetTLSAddress(":" + strconv.Itoa(d.Get("tls_challenge_port").(int))) + } + + var cert acme.CertificateResource + var errs map[string]error + + if v, ok := d.GetOk("certificate_request_pem"); ok { + csr, err := csrFromPEM([]byte(v.(string))) + if err != nil { + return err + } + cert, errs = client.ObtainCertificateForCSR(*csr, true) + } else { + cn := d.Get("common_name").(string) + domains := []string{cn} + if s, ok := d.GetOk("subject_alternative_names"); ok { + for _, v := range stringSlice(s.(*schema.Set).List()) { + if v == cn { + return fmt.Errorf("common name %s should not appear in SAN list", v) + } + domains = append(domains, v) + } + } + + cert, errs = client.ObtainCertificate(domains, true, nil) + } + + if len(errs) > 0 { + messages := []string{} + for k, v := range errs { + messages = append(messages, fmt.Sprintf("%s: %s", k, v)) + } + return fmt.Errorf("Errors were encountered creating the certificate:\n %s", strings.Join(messages, "\n ")) + } + + // done! save the cert + saveCertificateResource(d, cert) + + return nil +} + +// resourceACMECertificateRead renews the certificate if it is close to expiry. +// This value is controlled by the min_days_remaining attribute - if this value +// less than zero, the certificate is never renewed. +func resourceACMECertificateRead(d *schema.ResourceData, meta interface{}) error { + mindays := d.Get("min_days_remaining").(int) + if mindays < 0 { + log.Printf("[WARN] min_days_remaining is set to less than 0, certificate will never be renewed") + return nil + } + + client, _, err := expandACMEClient(d, d.Get("registration_url").(string)) + if err != nil { + return err + } + + cert := expandCertificateResource(d) + remaining, err := certDaysRemaining(cert) + if err != nil { + return err + } + + if int64(mindays) >= remaining { + if v, ok := d.GetOk("dns_challenge"); ok { + setDNSChallenge(client, v.(*schema.Set).List()[0].(map[string]interface{})) + } else { + client.SetHTTPAddress(":" + strconv.Itoa(d.Get("http_challenge_port").(int))) + client.SetTLSAddress(":" + strconv.Itoa(d.Get("tls_challenge_port").(int))) + } + newCert, err := client.RenewCertificate(cert, true) + if err != nil { + return err + } + saveCertificateResource(d, newCert) + } + + return nil +} + +// resourceACMECertificateDelete "deletes" the certificate by revoking it. +func resourceACMECertificateDelete(d *schema.ResourceData, meta interface{}) error { + client, _, err := expandACMEClient(d, d.Get("registration_url").(string)) + if err != nil { + return err + } + + cert, ok := d.GetOk("certificate_pem") + + if ok { + err = client.RevokeCertificate([]byte(cert.(string))) + if err != nil { + return err + } + } + + // Add a state waiter for the OCSP status of the cert, to make sure it's + // truly revoked. + state := &resource.StateChangeConf{ + Pending: []string{"Good"}, + Target: []string{"Revoked"}, + Refresh: resourceACMECertificateRevokeRefreshFunc(cert.(string)), + Timeout: 3 * time.Minute, + MinTimeout: 15 * time.Second, + Delay: 5 * time.Second, + } + + _, err = state.WaitForState() + if err != nil { + return fmt.Errorf("Cert did not revoke: %s", err.Error()) + } + + d.SetId("") + return nil +} + +// resourceACMECertificateRevokeRefreshFunc polls the certificate's status +// via the OSCP url and returns success once it's Revoked. +func resourceACMECertificateRevokeRefreshFunc(cert string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + _, resp, err := acme.GetOCSPForCert([]byte(cert)) + if err != nil { + return nil, "", fmt.Errorf("Bad: %s", err.Error()) + } + switch resp.Status { + case ocsp.Revoked: + return cert, "Revoked", nil + case ocsp.Good: + return cert, "Good", nil + default: + return nil, "", fmt.Errorf("Bad status: OCSP status %d", resp.Status) + } + } +} diff --git a/builtin/providers/acme/resource_acme_certificate_test.go b/builtin/providers/acme/resource_acme_certificate_test.go new file mode 100644 index 000000000000..01596270a6e0 --- /dev/null +++ b/builtin/providers/acme/resource_acme_certificate_test.go @@ -0,0 +1,458 @@ +package acme + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "fmt" + "log" + "os" + "reflect" + "testing" + + "golang.org/x/crypto/ocsp" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/xenolf/lego/acme" +) + +func TestAccACMECertificate_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheckCert(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckACMECertificateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccACMECertificateConfig(), + Check: resource.ComposeTestCheckFunc( + testAccCheckACMECertificateValid("acme_certificate.certificate", "www", "www2"), + ), + }, + }, + }) +} + +func TestAccACMECertificate_CSR(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheckCert(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckACMECertificateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccACMECertificateCSRConfig(), + Check: resource.ComposeTestCheckFunc( + testAccCheckACMECertificateValid("acme_certificate.certificate", "www3", "www4"), + ), + }, + }, + }) +} + +func TestAccACMECertificate_withDNSProviderConfig(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheckCert(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckACMECertificateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccACMECertificateWithDNSProviderConfig(), + Check: resource.ComposeTestCheckFunc( + testAccCheckACMECertificateValid("acme_certificate.certificate", "www5", ""), + ), + }, + }, + }) +} + +func TestAccACMECertificate_forceRenewal(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheckCert(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckACMECertificateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccACMECertificateForceRenewalConfig(), + Check: resource.ComposeTestCheckFunc( + testAccCheckACMECertificateValid("acme_certificate.certificate", "www6", ""), + ), + }, + }, + }) +} + +func TestAccACMECertificate_httpChallenge(t *testing.T) { + if v := os.Getenv("ACME_HTTP_R53_ZONE_ID"); v == "" { + t.Skip("ACME_HTTP_R53_ZONE_ID not set - skipping") + } + if v := os.Getenv("ACME_HTTP_HOST_IP"); v == "" { + t.Skip("ACME_HTTP_HOST_IP not set - skipping") + } + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheckCert(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckACMECertificateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccACMECertificateHTTPChallengeConfig(), + Check: resource.ComposeTestCheckFunc( + testAccCheckACMECertificateValid("acme_certificate.certificate", "www7", ""), + ), + }, + }, + }) +} + +func testAccCheckACMECertificateValid(n, cn, san string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Can't find ACME certificate: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("ACME certificate ID not set") + } + + cert := rs.Primary.Attributes["certificate_pem"] + issuer := rs.Primary.Attributes["issuer_pem"] + key := rs.Primary.Attributes["private_key_pem"] + x509Certs, err := parsePEMBundle([]byte(cert)) + if err != nil { + return err + } + x509Cert := x509Certs[0] + + issuerCerts, err := parsePEMBundle([]byte(issuer)) + if err != nil { + return err + } + issuerCert := issuerCerts[0] + + // Skip the private key test if we have an empty key. This is a legit case + // that comes up when a CSR is supplied instead of creating a cert from + // scratch. + if key != "" { + privateKey, err := privateKeyFromPEM([]byte(key)) + if err != nil { + return err + } + + var privPub crypto.PublicKey + + switch v := privateKey.(type) { + case *rsa.PrivateKey: + privPub = v.Public() + case *ecdsa.PrivateKey: + privPub = v.Public() + } + + if reflect.DeepEqual(x509Cert.PublicKey, privPub) != true { + return fmt.Errorf("Public key for cert and private key don't match: %#v, %#v", x509Cert.PublicKey, privPub) + } + } + + // Ensure the issuer cert is a CA cert + if issuerCert.IsCA == false { + return fmt.Errorf("issuer_pem is not a CA certificate") + } + + // domains + domain := "." + os.Getenv("ACME_CERT_DOMAIN") + expectedCN := cn + domain + var expectedSANs []string + if san != "" { + expectedSANs = []string{cn + domain, san + domain} + } else { + expectedSANs = []string{cn + domain} + } + + actualCN := x509Cert.Subject.CommonName + actualSANs := x509Cert.DNSNames + + if expectedCN != actualCN { + return fmt.Errorf("Expected common name to be %s, got %s", expectedCN, actualCN) + } + + if reflect.DeepEqual(expectedSANs, actualSANs) != true { + return fmt.Errorf("Expected SANs to be %#v, got %#v", expectedSANs, actualSANs) + } + + return nil + } +} + +func testAccCheckACMECertificateDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "acme_certificate" { + continue + } + + cert := rs.Primary.Attributes["certificate_pem"] + _, resp, err := acme.GetOCSPForCert([]byte(cert)) + if err != nil { + return err + } + + if resp.Status != ocsp.Revoked { + return fmt.Errorf("Expected OCSP status to be %d, got %d", ocsp.Revoked, resp.Status) + } + + return nil + } + return fmt.Errorf("acme_certificate resource not found") +} + +func testAccPreCheckCert(t *testing.T) { + if v := os.Getenv("ACME_EMAIL_ADDRESS"); v == "" { + t.Fatal("ACME_EMAIL_ADDRESS must be set for the certificate acceptance test") + } + if v := os.Getenv("ACME_CERT_DOMAIN"); v == "" { + t.Fatal("ACME_CERT_DOMAIN must be set for the certificate acceptance test") + } + if v := os.Getenv("AWS_PROFILE"); v == "" { + if v := os.Getenv("AWS_ACCESS_KEY_ID"); v == "" { + t.Fatal("AWS_ACCESS_KEY_ID must be set for the certificate acceptance test") + } + if v := os.Getenv("AWS_SECRET_ACCESS_KEY"); v == "" { + t.Fatal("AWS_SECRET_ACCESS_KEY must be set for the certificate acceptance test") + } + } + if v := os.Getenv("AWS_DEFAULT_REGION"); v == "" { + log.Println("[INFO] Test: Using us-west-2 as test region") + os.Setenv("AWS_DEFAULT_REGION", "us-west-2") + } +} + +func testAccACMECertificateConfig() string { + return fmt.Sprintf(` +variable "email_address" { + default = "%s" +} + +variable "domain" { + default = "%s" +} + +resource "tls_private_key" "private_key" { + algorithm = "RSA" +} + +resource "acme_registration" "reg" { + server_url = "https://acme-staging.api.letsencrypt.org/directory" + account_key_pem = "${tls_private_key.private_key.private_key_pem}" + email_address = "${var.email_address}" +} + +resource "acme_certificate" "certificate" { + server_url = "https://acme-staging.api.letsencrypt.org/directory" + account_key_pem = "${tls_private_key.private_key.private_key_pem}" + common_name = "www.${var.domain}" + subject_alternative_names = ["www2.${var.domain}"] + + dns_challenge { + provider = "route53" + } + + registration_url = "${acme_registration.reg.id}" +} +`, os.Getenv("ACME_EMAIL_ADDRESS"), os.Getenv("ACME_CERT_DOMAIN")) +} + +func testAccACMECertificateCSRConfig() string { + return fmt.Sprintf(` +variable "email_address" { + default = "%s" +} + +variable "domain" { + default = "%s" +} + +resource "tls_private_key" "reg_private_key" { + algorithm = "RSA" +} + +resource "acme_registration" "reg" { + server_url = "https://acme-staging.api.letsencrypt.org/directory" + account_key_pem = "${tls_private_key.reg_private_key.private_key_pem}" + email_address = "${var.email_address}" +} + +resource "tls_private_key" "cert_private_key" { + algorithm = "RSA" +} + +resource "tls_cert_request" "req" { + key_algorithm = "RSA" + private_key_pem = "${tls_private_key.cert_private_key.private_key_pem}" + dns_names = ["www3.${var.domain}", "www4.${var.domain}"] + + subject { + common_name = "www3.${var.domain}" + } +} + +resource "acme_certificate" "certificate" { + server_url = "https://acme-staging.api.letsencrypt.org/directory" + account_key_pem = "${tls_private_key.reg_private_key.private_key_pem}" + certificate_request_pem = "${tls_cert_request.req.cert_request_pem}" + + dns_challenge { + provider = "route53" + } + + registration_url = "${acme_registration.reg.id}" +} +`, os.Getenv("ACME_EMAIL_ADDRESS"), os.Getenv("ACME_CERT_DOMAIN")) +} + +func testAccACMECertificateWithDNSProviderConfig() string { + return fmt.Sprintf(` +variable "email_address" { + default = "%s" +} + +variable "domain" { + default = "%s" +} + +resource "tls_private_key" "private_key" { + algorithm = "RSA" +} + +resource "acme_registration" "reg" { + server_url = "https://acme-staging.api.letsencrypt.org/directory" + account_key_pem = "${tls_private_key.private_key.private_key_pem}" + email_address = "${var.email_address}" +} + +resource "acme_certificate" "certificate" { + server_url = "https://acme-staging.api.letsencrypt.org/directory" + account_key_pem = "${tls_private_key.private_key.private_key_pem}" + common_name = "www5.${var.domain}" + + dns_challenge { + provider = "route53" + config { + "AWS_PROFILE" = "%s" + "AWS_ACCESS_KEY_ID " = "%s" + "AWS_SECRET_ACCESS_KEY " = "%s" + "AWS_SESSION_TOKEN" = "%s" + } + } + + registration_url = "${acme_registration.reg.id}" +} +`, os.Getenv("ACME_EMAIL_ADDRESS"), os.Getenv("ACME_CERT_DOMAIN"), os.Getenv("AWS_PROFILE"), os.Getenv("AWS_ACCESS_KEY_ID"), os.Getenv("AWS_SECRET_ACCESS_KEY"), os.Getenv("AWS_SESSION_TOKEN")) +} + +func testAccACMECertificateForceRenewalConfig() string { + return fmt.Sprintf(` +variable "email_address" { + default = "%s" +} + +variable "domain" { + default = "%s" +} + +resource "tls_private_key" "private_key" { + algorithm = "RSA" +} + +resource "acme_registration" "reg" { + server_url = "https://acme-staging.api.letsencrypt.org/directory" + account_key_pem = "${tls_private_key.private_key.private_key_pem}" + email_address = "${var.email_address}" +} + +resource "acme_certificate" "certificate" { + server_url = "https://acme-staging.api.letsencrypt.org/directory" + account_key_pem = "${tls_private_key.private_key.private_key_pem}" + common_name = "www6.${var.domain}" + min_days_remaining = 720 + + dns_challenge { + provider = "route53" + } + + registration_url = "${acme_registration.reg.id}" +} +`, os.Getenv("ACME_EMAIL_ADDRESS"), os.Getenv("ACME_CERT_DOMAIN")) +} + +func testAccACMECertificateECKeyCertConfig() string { + return fmt.Sprintf(` +variable "email_address" { + default = "%s" +} + +variable "domain" { + default = "%s" +} + +resource "tls_private_key" "private_key" { + algorithm = "RSA" +} + +resource "acme_registration" "reg" { + server_url = "https://acme-staging.api.letsencrypt.org/directory" + account_key_pem = "${tls_private_key.private_key.private_key_pem}" + email_address = "${var.email_address}" +} + +resource "acme_certificate" "certificate" { + server_url = "https://acme-staging.api.letsencrypt.org/directory" + account_key_pem = "${tls_private_key.private_key.private_key_pem}" + common_name = "www7.${var.domain}" + + dns_challenge { + provider = "route53" + } + + registration_url = "${acme_registration.reg.id}" +} +`, os.Getenv("ACME_EMAIL_ADDRESS"), os.Getenv("ACME_CERT_DOMAIN")) +} + +func testAccACMECertificateHTTPChallengeConfig() string { + return fmt.Sprintf(` +variable "email_address" { + default = "%s" +} + +variable "domain" { + default = "%s" +} + +resource "tls_private_key" "private_key" { + algorithm = "RSA" +} + +resource "acme_registration" "reg" { + server_url = "https://acme-staging.api.letsencrypt.org/directory" + account_key_pem = "${tls_private_key.private_key.private_key_pem}" + email_address = "${var.email_address}" +} + +resource "acme_certificate" "certificate" { + server_url = "https://acme-staging.api.letsencrypt.org/directory" + account_key_pem = "${tls_private_key.private_key.private_key_pem}" + common_name = "www7.${var.domain}" + + registration_url = "${acme_registration.reg.id}" + + depends_on = ["aws_route53_record.www7"] + +} + +resource "aws_route53_record" "www7" { + zone_id = "%s" + name = "www7" + type = "A" + ttl = "10" + records = ["%s"] +} +`, os.Getenv("ACME_EMAIL_ADDRESS"), os.Getenv("ACME_CERT_DOMAIN"), os.Getenv("ACME_HTTP_R53_ZONE_ID"), os.Getenv("ACME_HTTP_HOST_IP")) +} diff --git a/builtin/providers/acme/resource_acme_registration.go b/builtin/providers/acme/resource_acme_registration.go new file mode 100644 index 000000000000..77c5b6a8ddd9 --- /dev/null +++ b/builtin/providers/acme/resource_acme_registration.go @@ -0,0 +1,49 @@ +package acme + +import "github.com/hashicorp/terraform/helper/schema" + +func resourceACMERegistration() *schema.Resource { + return &schema.Resource{ + Create: resourceACMERegistrationCreate, + Read: resourceACMERegistrationRead, + Delete: resourceACMERegistrationDelete, + + Schema: registrationSchemaFull(), + } +} + +func resourceACMERegistrationCreate(d *schema.ResourceData, meta interface{}) error { + // register and agree to the TOS + client, user, err := expandACMEClient(d, "") + if err != nil { + return err + } + reg, err := client.Register() + if err != nil { + return err + } + user.Registration = reg + err = client.AgreeToTOS() + if err != nil { + return err + } + + // save the reg + err = saveACMERegistration(d, reg) + if err != nil { + return err + } + + return nil +} + +func resourceACMERegistrationRead(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceACMERegistrationDelete(d *schema.ResourceData, meta interface{}) error { + // TODO: Add deletion support using *acme.Client.DeleteRegistration(). + // I had this, but I think I jumped the gun on it still being in draft :) + d.SetId("") + return nil +} diff --git a/builtin/providers/acme/resource_acme_registration_test.go b/builtin/providers/acme/resource_acme_registration_test.go new file mode 100644 index 000000000000..57fb94936e96 --- /dev/null +++ b/builtin/providers/acme/resource_acme_registration_test.go @@ -0,0 +1,99 @@ +package acme + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccACMERegistration_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheckReg(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccACMERegistrationConfig(), + Check: resource.ComposeTestCheckFunc( + testAccCheckACMERegistrationValid("acme_registration.reg"), + ), + }, + }, + }) +} + +func testAccCheckACMERegistrationValid(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Can't find ACME registration: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("ACME registration ID not set") + } + + d := testAccCheckACMERegistrationResourceData(rs) + + client, _, err := expandACMEClient(d, d.Get("registration_url").(string)) + if err != nil { + return fmt.Errorf("Could not build ACME client off reg: %s", err.Error()) + } + reg, err := client.QueryRegistration() + if err != nil { + return fmt.Errorf("Error on reg query: %s", err.Error()) + } + + actual := reg.URI + expected := rs.Primary.ID + + if actual != expected { + return fmt.Errorf("Expected ID to be %s, got %s", expected, actual) + } + return nil + } +} + +// testAccCheckACMERegistrationResourceData returns a *schema.ResourceData that should match a +// acme_registration resource. +func testAccCheckACMERegistrationResourceData(rs *terraform.ResourceState) *schema.ResourceData { + r := &schema.Resource{ + Schema: registrationSchemaFull(), + } + d := r.TestResourceData() + + d.SetId(rs.Primary.ID) + d.Set("server_url", rs.Primary.Attributes["server_url"]) + d.Set("account_key_pem", rs.Primary.Attributes["account_key_pem"]) + d.Set("email_address", rs.Primary.Attributes["email_address"]) + d.Set("registration_body", rs.Primary.Attributes["registration_body"]) + d.Set("registration_url", rs.Primary.Attributes["registration_url"]) + d.Set("registration_new_authz_url", rs.Primary.Attributes["registration_new_authz_url"]) + d.Set("registration_tos_url", rs.Primary.Attributes["registration_tos_url"]) + + return d +} + +func testAccPreCheckReg(t *testing.T) { + if v := os.Getenv("ACME_EMAIL_ADDRESS"); v == "" { + t.Fatal("ACME_EMAIL_ADDRESS must be set for the registration acceptance test") + } +} + +func testAccACMERegistrationConfig() string { + return fmt.Sprintf(` +resource "tls_private_key" "private_key" { + algorithm = "RSA" +} + +resource "acme_registration" "reg" { + server_url = "https://acme-staging.api.letsencrypt.org/directory" + account_key_pem = "${tls_private_key.private_key.private_key_pem}" + email_address = "%s" + +} +`, os.Getenv("ACME_EMAIL_ADDRESS")) +} diff --git a/builtin/providers/tls/resource_cert_request.go b/builtin/providers/tls/resource_cert_request.go index 267f0db39b8c..ed1ca2cdefaa 100644 --- a/builtin/providers/tls/resource_cert_request.go +++ b/builtin/providers/tls/resource_cert_request.go @@ -52,6 +52,7 @@ func resourceCertRequest() *schema.Resource { Required: true, Description: "PEM-encoded private key that the certificate will belong to", ForceNew: true, + Sensitive: true, StateFunc: func(v interface{}) string { return hashForState(v.(string)) }, diff --git a/builtin/providers/tls/resource_locally_signed_cert.go b/builtin/providers/tls/resource_locally_signed_cert.go index 39c90022f8d3..2bcfa7e1f159 100644 --- a/builtin/providers/tls/resource_locally_signed_cert.go +++ b/builtin/providers/tls/resource_locally_signed_cert.go @@ -31,6 +31,7 @@ func resourceLocallySignedCert() *schema.Resource { Required: true, Description: "PEM-encoded CA private key used to sign the certificate", ForceNew: true, + Sensitive: true, StateFunc: func(v interface{}) string { return hashForState(v.(string)) }, diff --git a/builtin/providers/tls/resource_private_key.go b/builtin/providers/tls/resource_private_key.go index 8270cc624f95..966a50821eb8 100644 --- a/builtin/providers/tls/resource_private_key.go +++ b/builtin/providers/tls/resource_private_key.go @@ -79,8 +79,9 @@ func resourcePrivateKey() *schema.Resource { }, "private_key_pem": &schema.Schema{ - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Sensitive: true, }, "public_key_pem": &schema.Schema{ diff --git a/builtin/providers/tls/resource_self_signed_cert.go b/builtin/providers/tls/resource_self_signed_cert.go index 29e04154db12..94aea79dac10 100644 --- a/builtin/providers/tls/resource_self_signed_cert.go +++ b/builtin/providers/tls/resource_self_signed_cert.go @@ -50,6 +50,7 @@ func resourceSelfSignedCert() *schema.Resource { Required: true, Description: "PEM-encoded private key that the certificate will belong to", ForceNew: true, + Sensitive: true, StateFunc: func(v interface{}) string { return hashForState(v.(string)) }, diff --git a/command/internal_plugin_list.go b/command/internal_plugin_list.go index 7e9b246c868c..590bcc3f9aae 100644 --- a/command/internal_plugin_list.go +++ b/command/internal_plugin_list.go @@ -6,6 +6,7 @@ package command import ( + acmeprovider "github.com/hashicorp/terraform/builtin/providers/acme" atlasprovider "github.com/hashicorp/terraform/builtin/providers/atlas" awsprovider "github.com/hashicorp/terraform/builtin/providers/aws" azureprovider "github.com/hashicorp/terraform/builtin/providers/azure" @@ -58,6 +59,7 @@ import ( ) var InternalProviders = map[string]plugin.ProviderFunc{ + "acme": acmeprovider.Provider, "atlas": atlasprovider.Provider, "aws": awsprovider.Provider, "azure": azureprovider.Provider, diff --git a/vendor/github.com/JamesClonk/vultr/LICENSE b/vendor/github.com/JamesClonk/vultr/LICENSE new file mode 100644 index 000000000000..be20a12806d6 --- /dev/null +++ b/vendor/github.com/JamesClonk/vultr/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Fabio Berchtold + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/JamesClonk/vultr/lib/account_info.go b/vendor/github.com/JamesClonk/vultr/lib/account_info.go new file mode 100644 index 000000000000..8568461ee303 --- /dev/null +++ b/vendor/github.com/JamesClonk/vultr/lib/account_info.go @@ -0,0 +1,59 @@ +package lib + +import ( + "encoding/json" + "fmt" + "strconv" +) + +// AccountInfo of Vultr account +type AccountInfo struct { + Balance float64 `json:"balance"` + PendingCharges float64 `json:"pending_charges"` + LastPaymentDate string `json:"last_payment_date"` + LastPaymentAmount float64 `json:"last_payment_amount"` +} + +// GetAccountInfo retrieves the Vultr account information about current balance, pending charges, etc.. +func (c *Client) GetAccountInfo() (info AccountInfo, err error) { + if err := c.get(`account/info`, &info); err != nil { + return AccountInfo{}, err + } + return +} + +// UnmarshalJSON implements json.Unmarshaller on AccountInfo. +// This is needed because the Vultr API is inconsistent in it's JSON responses for account info. +// Some fields can change type, from JSON number to JSON string and vice-versa. +func (a *AccountInfo) UnmarshalJSON(data []byte) (err error) { + if a == nil { + *a = AccountInfo{} + } + + var fields map[string]interface{} + if err := json.Unmarshal(data, &fields); err != nil { + return err + } + + b, err := strconv.ParseFloat(fmt.Sprintf("%v", fields["balance"]), 64) + if err != nil { + return err + } + a.Balance = b + + pc, err := strconv.ParseFloat(fmt.Sprintf("%v", fields["pending_charges"]), 64) + if err != nil { + return err + } + a.PendingCharges = pc + + lpa, err := strconv.ParseFloat(fmt.Sprintf("%v", fields["last_payment_amount"]), 64) + if err != nil { + return err + } + a.LastPaymentAmount = lpa + + a.LastPaymentDate = fmt.Sprintf("%v", fields["last_payment_date"]) + + return +} diff --git a/vendor/github.com/JamesClonk/vultr/lib/client.go b/vendor/github.com/JamesClonk/vultr/lib/client.go new file mode 100644 index 000000000000..84044cb4b86f --- /dev/null +++ b/vendor/github.com/JamesClonk/vultr/lib/client.go @@ -0,0 +1,224 @@ +package lib + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/url" + "strings" + "time" + + "github.com/juju/ratelimit" +) + +const ( + // Version of this libary + Version = "v1.8" + + // APIVersion of Vultr + APIVersion = "v1" + + // DefaultEndpoint to be used + DefaultEndpoint = "https://api.vultr.com/" + + mediaType = "application/json" +) + +// retryableStatusCodes are API response status codes that indicate that +// the failed request can be retried without further actions. +var retryableStatusCodes = map[int]struct{}{ + 503: {}, // Rate limit hit + 500: {}, // Internal server error. Try again at a later time. +} + +type Client struct { + // HTTP client for communication with the Vultr API + client *http.Client + + // User agent for HTTP client + UserAgent string + + // Endpoint URL for API requests + Endpoint *url.URL + + // API key for accessing the Vultr API + APIKey string + + // Max. number of request attempts + MaxAttempts int + + // Throttling struct + bucket *ratelimit.Bucket +} + +type Options struct { + // HTTP client for communication with the Vultr API + HTTPClient *http.Client + + // User agent for HTTP client + UserAgent string + + // Endpoint URL for API requests + Endpoint string + + // API rate limitation, calls per duration + RateLimitation time.Duration + + // Max. number of times to retry API calls + MaxRetries int +} + +// NewClient creates new Vultr API client. Options are optional and can be nil. +func NewClient(apiKey string, options *Options) *Client { + userAgent := "vultr-go/" + Version + client := http.DefaultClient + endpoint, _ := url.Parse(DefaultEndpoint) + rate := 505 * time.Millisecond + attempts := 1 + + if options != nil { + if options.HTTPClient != nil { + client = options.HTTPClient + } + if options.UserAgent != "" { + userAgent = options.UserAgent + } + if options.Endpoint != "" { + endpoint, _ = url.Parse(options.Endpoint) + } + if options.RateLimitation != 0 { + rate = options.RateLimitation + } + if options.MaxRetries != 0 { + attempts = options.MaxRetries + 1 + } + } + + return &Client{ + UserAgent: userAgent, + client: client, + Endpoint: endpoint, + APIKey: apiKey, + MaxAttempts: attempts, + bucket: ratelimit.NewBucket(rate, 1), + } +} + +func apiPath(path string) string { + return fmt.Sprintf("/%s/%s", APIVersion, path) +} + +func apiKeyPath(path, apiKey string) string { + if strings.Contains(path, "?") { + return path + "&api_key=" + apiKey + } + return path + "?api_key=" + apiKey +} + +func (c *Client) get(path string, data interface{}) error { + req, err := c.newRequest("GET", apiPath(path), nil) + if err != nil { + return err + } + return c.do(req, data) +} + +func (c *Client) post(path string, values url.Values, data interface{}) error { + req, err := c.newRequest("POST", apiPath(path), strings.NewReader(values.Encode())) + if err != nil { + return err + } + return c.do(req, data) +} + +func (c *Client) newRequest(method string, path string, body io.Reader) (*http.Request, error) { + relPath, err := url.Parse(apiKeyPath(path, c.APIKey)) + if err != nil { + return nil, err + } + + url := c.Endpoint.ResolveReference(relPath) + + req, err := http.NewRequest(method, url.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("User-Agent", c.UserAgent) + req.Header.Add("Accept", mediaType) + + if req.Method == "POST" { + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + } + return req, nil +} + +func (c *Client) do(req *http.Request, data interface{}) error { + // Throttle http requests to avoid hitting Vultr's API rate-limit + c.bucket.Wait(1) + + var apiError error + for tryCount := 1; tryCount <= c.MaxAttempts; tryCount++ { + resp, err := c.client.Do(req) + if err != nil { + return err + } + + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return err + } + + if resp.StatusCode == http.StatusOK { + if data != nil { + // avoid unmarshalling problem because Vultr API returns + // empty array instead of empty map when it shouldn't! + if string(body) == `[]` { + data = nil + } else { + if err := json.Unmarshal(body, data); err != nil { + return err + } + } + } + return nil + } + + apiError = errors.New(string(body)) + if !isCodeRetryable(resp.StatusCode) { + break + } + + delay := backoffDuration(tryCount) + time.Sleep(delay) + } + + return apiError +} + +// backoffDuration returns the duration to wait before retrying the request. +// Duration is an exponential function of the retry count with a jitter of ~0-30%. +func backoffDuration(retryCount int) time.Duration { + // Upper limit of delay at ~1 minute + if retryCount > 7 { + retryCount = 7 + } + + rand.Seed(time.Now().UnixNano()) + delay := (1 << uint(retryCount)) * (rand.Intn(150) + 500) + return time.Duration(delay) * time.Millisecond +} + +// isCodeRetryable returns true if the given status code means that we should retry. +func isCodeRetryable(statusCode int) bool { + if _, ok := retryableStatusCodes[statusCode]; ok { + return true + } + + return false +} diff --git a/vendor/github.com/JamesClonk/vultr/lib/dns.go b/vendor/github.com/JamesClonk/vultr/lib/dns.go new file mode 100644 index 000000000000..f6e321e1699f --- /dev/null +++ b/vendor/github.com/JamesClonk/vultr/lib/dns.go @@ -0,0 +1,112 @@ +package lib + +import ( + "fmt" + "net/url" +) + +// DNS Domain +type DnsDomain struct { + Domain string `json:"domain"` + Created string `json:"date_created"` +} + +// DNS Record +type DnsRecord struct { + RecordID int `json:"RECORDID"` + Type string `json:"type"` + Name string `json:"name"` + Data string `json:"data"` + Priority int `json:"priority"` + TTL int `json:"ttl"` +} + +func (c *Client) GetDnsDomains() (dnsdomains []DnsDomain, err error) { + if err := c.get(`dns/list`, &dnsdomains); err != nil { + return nil, err + } + return dnsdomains, nil +} + +func (c *Client) CreateDnsDomain(domain, serverip string) error { + values := url.Values{ + "domain": {domain}, + "serverip": {serverip}, + } + + if err := c.post(`dns/create_domain`, values, nil); err != nil { + return err + } + return nil +} + +func (c *Client) DeleteDnsDomain(domain string) error { + values := url.Values{ + "domain": {domain}, + } + + if err := c.post(`dns/delete_domain`, values, nil); err != nil { + return err + } + return nil +} + +func (c *Client) GetDnsRecords(domain string) (dnsrecords []DnsRecord, err error) { + if err := c.get(`dns/records?domain=`+domain, &dnsrecords); err != nil { + return nil, err + } + return dnsrecords, nil +} + +func (c *Client) CreateDnsRecord(domain, name, rtype, data string, priority, ttl int) error { + values := url.Values{ + "domain": {domain}, + "name": {name}, + "type": {rtype}, + "data": {data}, + "priority": {fmt.Sprintf("%v", priority)}, + "ttl": {fmt.Sprintf("%v", ttl)}, + } + + if err := c.post(`dns/create_record`, values, nil); err != nil { + return err + } + return nil +} + +func (c *Client) UpdateDnsRecord(domain string, dnsrecord DnsRecord) error { + values := url.Values{ + "domain": {domain}, + "RECORDID": {fmt.Sprintf("%v", dnsrecord.RecordID)}, + } + + if dnsrecord.Name != "" { + values.Add("name", dnsrecord.Name) + } + if dnsrecord.Data != "" { + values.Add("data", dnsrecord.Data) + } + if dnsrecord.Priority != 0 { + values.Add("priority", fmt.Sprintf("%v", dnsrecord.Priority)) + } + if dnsrecord.TTL != 0 { + values.Add("ttl", fmt.Sprintf("%v", dnsrecord.TTL)) + } + + if err := c.post(`dns/update_record`, values, nil); err != nil { + return err + } + return nil +} + +func (c *Client) DeleteDnsRecord(domain string, recordID int) error { + values := url.Values{ + "domain": {domain}, + "RECORDID": {fmt.Sprintf("%v", recordID)}, + } + + if err := c.post(`dns/delete_record`, values, nil); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/JamesClonk/vultr/lib/ip.go b/vendor/github.com/JamesClonk/vultr/lib/ip.go new file mode 100644 index 000000000000..e2e35ced25e8 --- /dev/null +++ b/vendor/github.com/JamesClonk/vultr/lib/ip.go @@ -0,0 +1,118 @@ +package lib + +import "net/url" + +// IPv4 information of a virtual machine +type IPv4 struct { + IP string `json:"ip"` + Netmask string `json:"netmask"` + Gateway string `json:"gateway"` + Type string `json:"type"` + ReverseDNS string `json:"reverse"` +} + +// IPv6 information of a virtual machine +type IPv6 struct { + IP string `json:"ip"` + Network string `json:"network"` + NetworkSize string `json:"network_size"` + Type string `json:"type"` +} + +// ReverseDNSIPv6 information of a virtual machine +type ReverseDNSIPv6 struct { + IP string `json:"ip"` + ReverseDNS string `json:"reverse"` +} + +func (c *Client) ListIPv4(id string) (list []IPv4, err error) { + var ipMap map[string][]IPv4 + if err := c.get(`server/list_ipv4?SUBID=`+id, &ipMap); err != nil { + return nil, err + } + + for _, iplist := range ipMap { + for _, ip := range iplist { + list = append(list, ip) + } + } + return list, nil +} + +func (c *Client) ListIPv6(id string) (list []IPv6, err error) { + var ipMap map[string][]IPv6 + if err := c.get(`server/list_ipv6?SUBID=`+id, &ipMap); err != nil { + return nil, err + } + + for _, iplist := range ipMap { + for _, ip := range iplist { + list = append(list, ip) + } + } + return list, nil +} + +func (c *Client) ListIPv6ReverseDNS(id string) (list []ReverseDNSIPv6, err error) { + var ipMap map[string][]ReverseDNSIPv6 + if err := c.get(`server/reverse_list_ipv6?SUBID=`+id, &ipMap); err != nil { + return nil, err + } + + for _, iplist := range ipMap { + for _, ip := range iplist { + list = append(list, ip) + } + } + return list, nil +} + +func (c *Client) DeleteIPv6ReverseDNS(id string, ip string) error { + values := url.Values{ + "SUBID": {id}, + "ip": {ip}, + } + + if err := c.post(`server/reverse_delete_ipv6`, values, nil); err != nil { + return err + } + return nil +} + +func (c *Client) SetIPv6ReverseDNS(id, ip, entry string) error { + values := url.Values{ + "SUBID": {id}, + "ip": {ip}, + "entry": {entry}, + } + + if err := c.post(`server/reverse_set_ipv6`, values, nil); err != nil { + return err + } + return nil +} + +func (c *Client) DefaultIPv4ReverseDNS(id, ip string) error { + values := url.Values{ + "SUBID": {id}, + "ip": {ip}, + } + + if err := c.post(`server/reverse_default_ipv4`, values, nil); err != nil { + return err + } + return nil +} + +func (c *Client) SetIPv4ReverseDNS(id, ip, entry string) error { + values := url.Values{ + "SUBID": {id}, + "ip": {ip}, + "entry": {entry}, + } + + if err := c.post(`server/reverse_set_ipv4`, values, nil); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/JamesClonk/vultr/lib/iso.go b/vendor/github.com/JamesClonk/vultr/lib/iso.go new file mode 100644 index 000000000000..21e57edfae21 --- /dev/null +++ b/vendor/github.com/JamesClonk/vultr/lib/iso.go @@ -0,0 +1,23 @@ +package lib + +// ISO image on Vultr +type ISO struct { + ID int `json:"ISOID"` + Created string `json:"date_created"` + Filename string `json:"filename"` + Size int `json:"size"` + MD5sum string `json:"md5sum"` +} + +func (c *Client) GetISO() ([]ISO, error) { + var isoMap map[string]ISO + if err := c.get(`iso/list`, &isoMap); err != nil { + return nil, err + } + + var isoList []ISO + for _, iso := range isoMap { + isoList = append(isoList, iso) + } + return isoList, nil +} diff --git a/vendor/github.com/JamesClonk/vultr/lib/os.go b/vendor/github.com/JamesClonk/vultr/lib/os.go new file mode 100644 index 000000000000..7e6ff9592cf1 --- /dev/null +++ b/vendor/github.com/JamesClonk/vultr/lib/os.go @@ -0,0 +1,24 @@ +package lib + +// OS image on Vultr +type OS struct { + ID int `json:"OSID"` + Name string `json:"name"` + Arch string `json:"arch"` + Family string `json:"family"` + Windows bool `json:"windows"` + Surcharge string `json:"surcharge"` +} + +func (c *Client) GetOS() ([]OS, error) { + var osMap map[string]OS + if err := c.get(`os/list`, &osMap); err != nil { + return nil, err + } + + var osList []OS + for _, os := range osMap { + osList = append(osList, os) + } + return osList, nil +} diff --git a/vendor/github.com/JamesClonk/vultr/lib/plans.go b/vendor/github.com/JamesClonk/vultr/lib/plans.go new file mode 100644 index 000000000000..084b59f8e879 --- /dev/null +++ b/vendor/github.com/JamesClonk/vultr/lib/plans.go @@ -0,0 +1,35 @@ +package lib + +import "fmt" + +// Plan on Vultr +type Plan struct { + ID int `json:"VPSPLANID,string"` + Name string `json:"name"` + VCpus int `json:"vcpu_count,string"` + RAM string `json:"ram"` + Disk string `json:"disk"` + Bandwidth string `json:"bandwidth"` + Price string `json:"price_per_month"` + Regions []int `json:"available_locations"` +} + +func (c *Client) GetPlans() ([]Plan, error) { + var planMap map[string]Plan + if err := c.get(`plans/list`, &planMap); err != nil { + return nil, err + } + + var planList []Plan + for _, plan := range planMap { + planList = append(planList, plan) + } + return planList, nil +} + +func (c *Client) GetAvailablePlansForRegion(id int) (planIDs []int, err error) { + if err := c.get(fmt.Sprintf(`regions/availability?DCID=%v`, id), &planIDs); err != nil { + return nil, err + } + return +} diff --git a/vendor/github.com/JamesClonk/vultr/lib/regions.go b/vendor/github.com/JamesClonk/vultr/lib/regions.go new file mode 100644 index 000000000000..5e8a5a98e838 --- /dev/null +++ b/vendor/github.com/JamesClonk/vultr/lib/regions.go @@ -0,0 +1,24 @@ +package lib + +// Region on Vultr +type Region struct { + ID int `json:"DCID,string"` + Name string `json:"name"` + Country string `json:"country"` + Continent string `json:"continent"` + State string `json:"state"` + Ddos bool `json:"ddos_protection"` +} + +func (c *Client) GetRegions() ([]Region, error) { + var regionMap map[string]Region + if err := c.get(`regions/list`, ®ionMap); err != nil { + return nil, err + } + + var regionList []Region + for _, os := range regionMap { + regionList = append(regionList, os) + } + return regionList, nil +} diff --git a/vendor/github.com/JamesClonk/vultr/lib/scripts.go b/vendor/github.com/JamesClonk/vultr/lib/scripts.go new file mode 100644 index 000000000000..2e834c81f7ce --- /dev/null +++ b/vendor/github.com/JamesClonk/vultr/lib/scripts.go @@ -0,0 +1,110 @@ +package lib + +import ( + "encoding/json" + "fmt" + "net/url" +) + +// StartupScript on Vultr account +type StartupScript struct { + ID string `json:"SCRIPTID"` + Name string `json:"name"` + Type string `json:"type"` + Content string `json:"script"` +} + +// Implements json.Unmarshaller on StartupScript. +// Necessary because the SRIPTID field has inconsistent types. +func (s *StartupScript) UnmarshalJSON(data []byte) (err error) { + if s == nil { + *s = StartupScript{} + } + + var fields map[string]interface{} + if err := json.Unmarshal(data, &fields); err != nil { + return err + } + + s.ID = fmt.Sprintf("%v", fields["SCRIPTID"]) + s.Name = fmt.Sprintf("%v", fields["name"]) + s.Type = fmt.Sprintf("%v", fields["type"]) + s.Content = fmt.Sprintf("%v", fields["script"]) + + return +} + +func (c *Client) GetStartupScripts() (scripts []StartupScript, err error) { + var scriptMap map[string]StartupScript + if err := c.get(`startupscript/list`, &scriptMap); err != nil { + return nil, err + } + + for _, script := range scriptMap { + if script.Type == "" { + script.Type = "boot" // set default script type + } + scripts = append(scripts, script) + } + return scripts, nil +} + +func (c *Client) GetStartupScript(id string) (StartupScript, error) { + scripts, err := c.GetStartupScripts() + if err != nil { + return StartupScript{}, err + } + + for _, s := range scripts { + if s.ID == id { + return s, nil + } + } + return StartupScript{}, nil +} + +func (c *Client) CreateStartupScript(name, content, scriptType string) (StartupScript, error) { + values := url.Values{ + "name": {name}, + "script": {content}, + "type": {scriptType}, + } + + var script StartupScript + if err := c.post(`startupscript/create`, values, &script); err != nil { + return StartupScript{}, err + } + script.Name = name + script.Content = content + script.Type = scriptType + + return script, nil +} + +func (c *Client) UpdateStartupScript(script StartupScript) error { + values := url.Values{ + "SCRIPTID": {script.ID}, + } + if script.Name != "" { + values.Add("name", script.Name) + } + if script.Content != "" { + values.Add("script", script.Content) + } + + if err := c.post(`startupscript/update`, values, nil); err != nil { + return err + } + return nil +} + +func (c *Client) DeleteStartupScript(id string) error { + values := url.Values{ + "SCRIPTID": {id}, + } + + if err := c.post(`startupscript/destroy`, values, nil); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/JamesClonk/vultr/lib/servers.go b/vendor/github.com/JamesClonk/vultr/lib/servers.go new file mode 100644 index 000000000000..2bb792b32f11 --- /dev/null +++ b/vendor/github.com/JamesClonk/vultr/lib/servers.go @@ -0,0 +1,386 @@ +package lib + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "net/url" + "strconv" +) + +// Server (virtual machine) on Vultr account +type Server struct { + ID string `json:"SUBID"` + Name string `json:"label"` + OS string `json:"os"` + RAM string `json:"ram"` + Disk string `json:"disk"` + MainIP string `json:"main_ip"` + VCpus int `json:"vcpu_count,string"` + Location string `json:"location"` + RegionID int `json:"DCID,string"` + DefaultPassword string `json:"default_password"` + Created string `json:"date_created"` + PendingCharges float64 `json:"pending_charges"` + Status string `json:"status"` + Cost string `json:"cost_per_month"` + CurrentBandwidth float64 `json:"current_bandwidth_gb"` + AllowedBandwidth float64 `json:"allowed_bandwidth_gb,string"` + NetmaskV4 string `json:"netmask_v4"` + GatewayV4 string `json:"gateway_v4"` + PowerStatus string `json:"power_status"` + ServerState string `json:"server_state"` + PlanID int `json:"VPSPLANID,string"` + V6Networks []V6Network `json:"v6_networks"` + InternalIP string `json:"internal_ip"` + KVMUrl string `json:"kvm_url"` + AutoBackups string `json:"auto_backups"` + Tag string `json:"tag"` +} + +// ServerOptions are optional parameters to be used during server creation +type ServerOptions struct { + IPXEChainURL string + ISO int + Script int + UserData string + Snapshot string + SSHKey string + IPV6 bool + PrivateNetworking bool + AutoBackups bool + DontNotifyOnActivate bool +} + +// V6Network represents a IPv6 network of a Vultr server +type V6Network struct { + Network string `json:"v6_network"` + MainIP string `json:"v6_main_ip"` + NetworkSize string `json:"v6_network_size"` +} + +// UnmarshalJSON implements json.Unmarshaller on Server. +// This is needed because the Vultr API is inconsistent in it's JSON responses for servers. +// Some fields can change type, from JSON number to JSON string and vice-versa. +func (s *Server) UnmarshalJSON(data []byte) (err error) { + if s == nil { + *s = Server{} + } + + var fields map[string]interface{} + if err := json.Unmarshal(data, &fields); err != nil { + return err + } + + value := fmt.Sprintf("%v", fields["vcpu_count"]) + if len(value) == 0 || value == "" { + value = "0" + } + vcpu, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + s.VCpus = int(vcpu) + + value = fmt.Sprintf("%v", fields["DCID"]) + if len(value) == 0 || value == "" { + value = "0" + } + region, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + s.RegionID = int(region) + + value = fmt.Sprintf("%v", fields["VPSPLANID"]) + if len(value) == 0 || value == "" { + value = "0" + } + plan, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + s.PlanID = int(plan) + + value = fmt.Sprintf("%v", fields["pending_charges"]) + if len(value) == 0 || value == "" { + value = "0" + } + pc, err := strconv.ParseFloat(value, 64) + if err != nil { + return err + } + s.PendingCharges = pc + + value = fmt.Sprintf("%v", fields["current_bandwidth_gb"]) + if len(value) == 0 || value == "" { + value = "0" + } + cb, err := strconv.ParseFloat(value, 64) + if err != nil { + return err + } + s.CurrentBandwidth = cb + + value = fmt.Sprintf("%v", fields["allowed_bandwidth_gb"]) + if len(value) == 0 || value == "" { + value = "0" + } + ab, err := strconv.ParseFloat(value, 64) + if err != nil { + return err + } + s.AllowedBandwidth = ab + + s.ID = fmt.Sprintf("%v", fields["SUBID"]) + s.Name = fmt.Sprintf("%v", fields["label"]) + s.OS = fmt.Sprintf("%v", fields["os"]) + s.RAM = fmt.Sprintf("%v", fields["ram"]) + s.Disk = fmt.Sprintf("%v", fields["disk"]) + s.MainIP = fmt.Sprintf("%v", fields["main_ip"]) + s.Location = fmt.Sprintf("%v", fields["location"]) + s.DefaultPassword = fmt.Sprintf("%v", fields["default_password"]) + s.Created = fmt.Sprintf("%v", fields["date_created"]) + s.Status = fmt.Sprintf("%v", fields["status"]) + s.Cost = fmt.Sprintf("%v", fields["cost_per_month"]) + s.NetmaskV4 = fmt.Sprintf("%v", fields["netmask_v4"]) + s.GatewayV4 = fmt.Sprintf("%v", fields["gateway_v4"]) + s.PowerStatus = fmt.Sprintf("%v", fields["power_status"]) + s.ServerState = fmt.Sprintf("%v", fields["server_state"]) + + v6networks := make([]V6Network, 0) + if networks, ok := fields["v6_networks"].([]interface{}); ok { + for _, network := range networks { + if network, ok := network.(map[string]interface{}); ok { + v6network := V6Network{ + Network: fmt.Sprintf("%v", network["v6_network"]), + MainIP: fmt.Sprintf("%v", network["v6_main_ip"]), + NetworkSize: fmt.Sprintf("%v", network["v6_network_size"]), + } + v6networks = append(v6networks, v6network) + } + } + s.V6Networks = v6networks + } + + s.InternalIP = fmt.Sprintf("%v", fields["internal_ip"]) + s.KVMUrl = fmt.Sprintf("%v", fields["kvm_url"]) + s.AutoBackups = fmt.Sprintf("%v", fields["auto_backups"]) + s.Tag = fmt.Sprintf("%v", fields["tag"]) + + return +} + +func (c *Client) GetServers() (servers []Server, err error) { + var serverMap map[string]Server + if err := c.get(`server/list`, &serverMap); err != nil { + return nil, err + } + + for _, server := range serverMap { + servers = append(servers, server) + } + return servers, nil +} + +func (c *Client) GetServersByTag(tag string) (servers []Server, err error) { + var serverMap map[string]Server + if err := c.get(`server/list?tag=`+tag, &serverMap); err != nil { + return nil, err + } + + for _, server := range serverMap { + servers = append(servers, server) + } + return servers, nil +} + +func (c *Client) GetServer(id string) (server Server, err error) { + if err := c.get(`server/list?SUBID=`+id, &server); err != nil { + return Server{}, err + } + return server, nil +} + +func (c *Client) CreateServer(name string, regionID, planID, osID int, options *ServerOptions) (Server, error) { + values := url.Values{ + "label": {name}, + "DCID": {fmt.Sprintf("%v", regionID)}, + "VPSPLANID": {fmt.Sprintf("%v", planID)}, + "OSID": {fmt.Sprintf("%v", osID)}, + } + + if options != nil { + if options.IPXEChainURL != "" { + values.Add("ipxe_chain_url", options.IPXEChainURL) + } + + if options.ISO != 0 { + values.Add("ISOID", fmt.Sprintf("%v", options.ISO)) + } + + if options.Script != 0 { + values.Add("SCRIPTID", fmt.Sprintf("%v", options.Script)) + } + + if options.UserData != "" { + values.Add("userdata", base64.StdEncoding.EncodeToString([]byte(options.UserData))) + } + + if options.Snapshot != "" { + values.Add("SNAPSHOTID", options.Snapshot) + } + + if options.SSHKey != "" { + values.Add("SSHKEYID", options.SSHKey) + } + + values.Add("enable_ipv6", "no") + if options.IPV6 { + values.Set("enable_ipv6", "yes") + } + + values.Add("enable_private_network", "no") + if options.PrivateNetworking { + values.Set("enable_private_network", "yes") + } + + values.Add("auto_backups", "no") + if options.AutoBackups { + values.Set("auto_backups", "yes") + } + + values.Add("notify_activate", "yes") + if options.DontNotifyOnActivate { + values.Set("notify_activate", "no") + } + } + + var server Server + if err := c.post(`server/create`, values, &server); err != nil { + return Server{}, err + } + server.Name = name + server.RegionID = regionID + server.PlanID = planID + + return server, nil +} + +func (c *Client) RenameServer(id, name string) error { + values := url.Values{ + "SUBID": {id}, + "label": {name}, + } + + if err := c.post(`server/label_set`, values, nil); err != nil { + return err + } + return nil +} + +func (c *Client) StartServer(id string) error { + values := url.Values{ + "SUBID": {id}, + } + + if err := c.post(`server/start`, values, nil); err != nil { + return err + } + return nil +} + +func (c *Client) HaltServer(id string) error { + values := url.Values{ + "SUBID": {id}, + } + + if err := c.post(`server/halt`, values, nil); err != nil { + return err + } + return nil +} + +func (c *Client) RebootServer(id string) error { + values := url.Values{ + "SUBID": {id}, + } + + if err := c.post(`server/reboot`, values, nil); err != nil { + return err + } + return nil +} + +func (c *Client) ReinstallServer(id string) error { + values := url.Values{ + "SUBID": {id}, + } + + if err := c.post(`server/reinstall`, values, nil); err != nil { + return err + } + return nil +} + +func (c *Client) ChangeOSofServer(id string, osID int) error { + values := url.Values{ + "SUBID": {id}, + "OSID": {fmt.Sprintf("%v", osID)}, + } + + if err := c.post(`server/os_change`, values, nil); err != nil { + return err + } + return nil +} + +func (c *Client) ListOSforServer(id string) (os []OS, err error) { + var osMap map[string]OS + if err := c.get(`server/os_change_list?SUBID=`+id, &osMap); err != nil { + return nil, err + } + + for _, o := range osMap { + os = append(os, o) + } + return os, nil +} + +func (c *Client) DeleteServer(id string) error { + values := url.Values{ + "SUBID": {id}, + } + + if err := c.post(`server/destroy`, values, nil); err != nil { + return err + } + return nil +} + +func (c *Client) BandwidthOfServer(id string) (bandwidth []map[string]string, err error) { + var bandwidthMap map[string][][]string + if err := c.get(`server/bandwidth?SUBID=`+id, &bandwidthMap); err != nil { + return nil, err + } + + // parse incoming bytes + for _, b := range bandwidthMap["incoming_bytes"] { + bMap := make(map[string]string) + bMap["date"] = b[0] + bMap["incoming"] = b[1] + bandwidth = append(bandwidth, bMap) + } + + // parse outgoing bytes (we'll assume that incoming and outgoing dates are always a match) + for _, b := range bandwidthMap["outgoing_bytes"] { + for i := range bandwidth { + if bandwidth[i]["date"] == b[0] { + bandwidth[i]["outgoing"] = b[1] + break + } + } + } + + return bandwidth, nil +} diff --git a/vendor/github.com/JamesClonk/vultr/lib/snapshots.go b/vendor/github.com/JamesClonk/vultr/lib/snapshots.go new file mode 100644 index 000000000000..7500397f07a4 --- /dev/null +++ b/vendor/github.com/JamesClonk/vultr/lib/snapshots.go @@ -0,0 +1,50 @@ +package lib + +import "net/url" + +// Snapshot of a virtual machine on Vultr account +type Snapshot struct { + ID string `json:"SNAPSHOTID"` + Description string `json:"description"` + Size string `json:"size"` + Status string `json:"status"` + Created string `json:"date_created"` +} + +func (c *Client) GetSnapshots() (snapshots []Snapshot, err error) { + var snapshotMap map[string]Snapshot + if err := c.get(`snapshot/list`, &snapshotMap); err != nil { + return nil, err + } + + for _, snapshot := range snapshotMap { + snapshots = append(snapshots, snapshot) + } + return snapshots, nil +} + +func (c *Client) CreateSnapshot(id, description string) (Snapshot, error) { + values := url.Values{ + "SUBID": {id}, + "description": {description}, + } + + var snapshot Snapshot + if err := c.post(`snapshot/create`, values, &snapshot); err != nil { + return Snapshot{}, err + } + snapshot.Description = description + + return snapshot, nil +} + +func (c *Client) DeleteSnapshot(id string) error { + values := url.Values{ + "SNAPSHOTID": {id}, + } + + if err := c.post(`snapshot/destroy`, values, nil); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/JamesClonk/vultr/lib/sshkeys.go b/vendor/github.com/JamesClonk/vultr/lib/sshkeys.go new file mode 100644 index 000000000000..c178fe71d25f --- /dev/null +++ b/vendor/github.com/JamesClonk/vultr/lib/sshkeys.go @@ -0,0 +1,67 @@ +package lib + +import "net/url" + +// SSHKey on Vultr account +type SSHKey struct { + ID string `json:"SSHKEYID"` + Name string `json:"name"` + Key string `json:"ssh_key"` + Created string `json:"date_created"` +} + +func (c *Client) GetSSHKeys() (keys []SSHKey, err error) { + var keyMap map[string]SSHKey + if err := c.get(`sshkey/list`, &keyMap); err != nil { + return nil, err + } + + for _, key := range keyMap { + keys = append(keys, key) + } + return keys, nil +} + +func (c *Client) CreateSSHKey(name, key string) (SSHKey, error) { + values := url.Values{ + "name": {name}, + "ssh_key": {key}, + } + + var sshKey SSHKey + if err := c.post(`sshkey/create`, values, &sshKey); err != nil { + return SSHKey{}, err + } + sshKey.Name = name + sshKey.Key = key + + return sshKey, nil +} + +func (c *Client) UpdateSSHKey(key SSHKey) error { + values := url.Values{ + "SSHKEYID": {key.ID}, + } + if key.Name != "" { + values.Add("name", key.Name) + } + if key.Key != "" { + values.Add("ssh_key", key.Key) + } + + if err := c.post(`sshkey/update`, values, nil); err != nil { + return err + } + return nil +} + +func (c *Client) DeleteSSHKey(id string) error { + values := url.Values{ + "SSHKEYID": {id}, + } + + if err := c.post(`sshkey/destroy`, values, nil); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/juju/ratelimit/LICENSE b/vendor/github.com/juju/ratelimit/LICENSE new file mode 100644 index 000000000000..ade9307b390c --- /dev/null +++ b/vendor/github.com/juju/ratelimit/LICENSE @@ -0,0 +1,191 @@ +All files in this repository are licensed as follows. If you contribute +to this repository, it is assumed that you license your contribution +under the same license unless you state otherwise. + +All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/vendor/github.com/juju/ratelimit/README.md b/vendor/github.com/juju/ratelimit/README.md new file mode 100644 index 000000000000..a0fdfe2b128a --- /dev/null +++ b/vendor/github.com/juju/ratelimit/README.md @@ -0,0 +1,117 @@ +# ratelimit +-- + import "github.com/juju/ratelimit" + +The ratelimit package provides an efficient token bucket implementation. See +http://en.wikipedia.org/wiki/Token_bucket. + +## Usage + +#### func Reader + +```go +func Reader(r io.Reader, bucket *Bucket) io.Reader +``` +Reader returns a reader that is rate limited by the given token bucket. Each +token in the bucket represents one byte. + +#### func Writer + +```go +func Writer(w io.Writer, bucket *Bucket) io.Writer +``` +Writer returns a writer that is rate limited by the given token bucket. Each +token in the bucket represents one byte. + +#### type Bucket + +```go +type Bucket struct { +} +``` + +Bucket represents a token bucket that fills at a predetermined rate. Methods on +Bucket may be called concurrently. + +#### func NewBucket + +```go +func NewBucket(fillInterval time.Duration, capacity int64) *Bucket +``` +NewBucket returns a new token bucket that fills at the rate of one token every +fillInterval, up to the given maximum capacity. Both arguments must be positive. +The bucket is initially full. + +#### func NewBucketWithQuantum + +```go +func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket +``` +NewBucketWithQuantum is similar to NewBucket, but allows the specification of +the quantum size - quantum tokens are added every fillInterval. + +#### func NewBucketWithRate + +```go +func NewBucketWithRate(rate float64, capacity int64) *Bucket +``` +NewBucketWithRate returns a token bucket that fills the bucket at the rate of +rate tokens per second up to the given maximum capacity. Because of limited +clock resolution, at high rates, the actual rate may be up to 1% different from +the specified rate. + +#### func (*Bucket) Rate + +```go +func (tb *Bucket) Rate() float64 +``` +Rate returns the fill rate of the bucket, in tokens per second. + +#### func (*Bucket) Take + +```go +func (tb *Bucket) Take(count int64) time.Duration +``` +Take takes count tokens from the bucket without blocking. It returns the time +that the caller should wait until the tokens are actually available. + +Note that if the request is irrevocable - there is no way to return tokens to +the bucket once this method commits us to taking them. + +#### func (*Bucket) TakeAvailable + +```go +func (tb *Bucket) TakeAvailable(count int64) int64 +``` +TakeAvailable takes up to count immediately available tokens from the bucket. It +returns the number of tokens removed, or zero if there are no available tokens. +It does not block. + +#### func (*Bucket) TakeMaxDuration + +```go +func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) +``` +TakeMaxDuration is like Take, except that it will only take tokens from the +bucket if the wait time for the tokens is no greater than maxWait. + +If it would take longer than maxWait for the tokens to become available, it does +nothing and reports false, otherwise it returns the time that the caller should +wait until the tokens are actually available, and reports true. + +#### func (*Bucket) Wait + +```go +func (tb *Bucket) Wait(count int64) +``` +Wait takes count tokens from the bucket, waiting until they are available. + +#### func (*Bucket) WaitMaxDuration + +```go +func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool +``` +WaitMaxDuration is like Wait except that it will only take tokens from the +bucket if it needs to wait for no greater than maxWait. It reports whether any +tokens have been removed from the bucket If no tokens have been removed, it +returns immediately. diff --git a/vendor/github.com/juju/ratelimit/ratelimit.go b/vendor/github.com/juju/ratelimit/ratelimit.go new file mode 100644 index 000000000000..3ef32fbcc098 --- /dev/null +++ b/vendor/github.com/juju/ratelimit/ratelimit.go @@ -0,0 +1,245 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3 with static-linking exception. +// See LICENCE file for details. + +// The ratelimit package provides an efficient token bucket implementation +// that can be used to limit the rate of arbitrary things. +// See http://en.wikipedia.org/wiki/Token_bucket. +package ratelimit + +import ( + "math" + "strconv" + "sync" + "time" +) + +// Bucket represents a token bucket that fills at a predetermined rate. +// Methods on Bucket may be called concurrently. +type Bucket struct { + startTime time.Time + capacity int64 + quantum int64 + fillInterval time.Duration + + // The mutex guards the fields following it. + mu sync.Mutex + + // avail holds the number of available tokens + // in the bucket, as of availTick ticks from startTime. + // It will be negative when there are consumers + // waiting for tokens. + avail int64 + availTick int64 +} + +// NewBucket returns a new token bucket that fills at the +// rate of one token every fillInterval, up to the given +// maximum capacity. Both arguments must be +// positive. The bucket is initially full. +func NewBucket(fillInterval time.Duration, capacity int64) *Bucket { + return NewBucketWithQuantum(fillInterval, capacity, 1) +} + +// rateMargin specifes the allowed variance of actual +// rate from specified rate. 1% seems reasonable. +const rateMargin = 0.01 + +// NewBucketWithRate returns a token bucket that fills the bucket +// at the rate of rate tokens per second up to the given +// maximum capacity. Because of limited clock resolution, +// at high rates, the actual rate may be up to 1% different from the +// specified rate. +func NewBucketWithRate(rate float64, capacity int64) *Bucket { + for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) { + fillInterval := time.Duration(1e9 * float64(quantum) / rate) + if fillInterval <= 0 { + continue + } + tb := NewBucketWithQuantum(fillInterval, capacity, quantum) + if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin { + return tb + } + } + panic("cannot find suitable quantum for " + strconv.FormatFloat(rate, 'g', -1, 64)) +} + +// nextQuantum returns the next quantum to try after q. +// We grow the quantum exponentially, but slowly, so we +// get a good fit in the lower numbers. +func nextQuantum(q int64) int64 { + q1 := q * 11 / 10 + if q1 == q { + q1++ + } + return q1 +} + +// NewBucketWithQuantum is similar to NewBucket, but allows +// the specification of the quantum size - quantum tokens +// are added every fillInterval. +func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket { + if fillInterval <= 0 { + panic("token bucket fill interval is not > 0") + } + if capacity <= 0 { + panic("token bucket capacity is not > 0") + } + if quantum <= 0 { + panic("token bucket quantum is not > 0") + } + return &Bucket{ + startTime: time.Now(), + capacity: capacity, + quantum: quantum, + avail: capacity, + fillInterval: fillInterval, + } +} + +// Wait takes count tokens from the bucket, waiting until they are +// available. +func (tb *Bucket) Wait(count int64) { + if d := tb.Take(count); d > 0 { + time.Sleep(d) + } +} + +// WaitMaxDuration is like Wait except that it will +// only take tokens from the bucket if it needs to wait +// for no greater than maxWait. It reports whether +// any tokens have been removed from the bucket +// If no tokens have been removed, it returns immediately. +func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool { + d, ok := tb.TakeMaxDuration(count, maxWait) + if d > 0 { + time.Sleep(d) + } + return ok +} + +const infinityDuration time.Duration = 0x7fffffffffffffff + +// Take takes count tokens from the bucket without blocking. It returns +// the time that the caller should wait until the tokens are actually +// available. +// +// Note that if the request is irrevocable - there is no way to return +// tokens to the bucket once this method commits us to taking them. +func (tb *Bucket) Take(count int64) time.Duration { + d, _ := tb.take(time.Now(), count, infinityDuration) + return d +} + +// TakeMaxDuration is like Take, except that +// it will only take tokens from the bucket if the wait +// time for the tokens is no greater than maxWait. +// +// If it would take longer than maxWait for the tokens +// to become available, it does nothing and reports false, +// otherwise it returns the time that the caller should +// wait until the tokens are actually available, and reports +// true. +func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) { + return tb.take(time.Now(), count, maxWait) +} + +// TakeAvailable takes up to count immediately available tokens from the +// bucket. It returns the number of tokens removed, or zero if there are +// no available tokens. It does not block. +func (tb *Bucket) TakeAvailable(count int64) int64 { + return tb.takeAvailable(time.Now(), count) +} + +// takeAvailable is the internal version of TakeAvailable - it takes the +// current time as an argument to enable easy testing. +func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 { + if count <= 0 { + return 0 + } + tb.mu.Lock() + defer tb.mu.Unlock() + + tb.adjust(now) + if tb.avail <= 0 { + return 0 + } + if count > tb.avail { + count = tb.avail + } + tb.avail -= count + return count +} + +// Available returns the number of available tokens. It will be negative +// when there are consumers waiting for tokens. Note that if this +// returns greater than zero, it does not guarantee that calls that take +// tokens from the buffer will succeed, as the number of available +// tokens could have changed in the meantime. This method is intended +// primarily for metrics reporting and debugging. +func (tb *Bucket) Available() int64 { + return tb.available(time.Now()) +} + +// available is the internal version of available - it takes the current time as +// an argument to enable easy testing. +func (tb *Bucket) available(now time.Time) int64 { + tb.mu.Lock() + defer tb.mu.Unlock() + tb.adjust(now) + return tb.avail +} + +// Capacity returns the capacity that the bucket was created with. +func (tb *Bucket) Capacity() int64 { + return tb.capacity +} + +// Rate returns the fill rate of the bucket, in tokens per second. +func (tb *Bucket) Rate() float64 { + return 1e9 * float64(tb.quantum) / float64(tb.fillInterval) +} + +// take is the internal version of Take - it takes the current time as +// an argument to enable easy testing. +func (tb *Bucket) take(now time.Time, count int64, maxWait time.Duration) (time.Duration, bool) { + if count <= 0 { + return 0, true + } + tb.mu.Lock() + defer tb.mu.Unlock() + + currentTick := tb.adjust(now) + avail := tb.avail - count + if avail >= 0 { + tb.avail = avail + return 0, true + } + // Round up the missing tokens to the nearest multiple + // of quantum - the tokens won't be available until + // that tick. + endTick := currentTick + (-avail+tb.quantum-1)/tb.quantum + endTime := tb.startTime.Add(time.Duration(endTick) * tb.fillInterval) + waitTime := endTime.Sub(now) + if waitTime > maxWait { + return 0, false + } + tb.avail = avail + return waitTime, true +} + +// adjust adjusts the current bucket capacity based on the current time. +// It returns the current tick. +func (tb *Bucket) adjust(now time.Time) (currentTick int64) { + currentTick = int64(now.Sub(tb.startTime) / tb.fillInterval) + + if tb.avail >= tb.capacity { + return + } + tb.avail += (currentTick - tb.availTick) * tb.quantum + if tb.avail > tb.capacity { + tb.avail = tb.capacity + } + tb.availTick = currentTick + return +} diff --git a/vendor/github.com/juju/ratelimit/reader.go b/vendor/github.com/juju/ratelimit/reader.go new file mode 100644 index 000000000000..6403bf78d4a5 --- /dev/null +++ b/vendor/github.com/juju/ratelimit/reader.go @@ -0,0 +1,51 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3 with static-linking exception. +// See LICENCE file for details. + +package ratelimit + +import "io" + +type reader struct { + r io.Reader + bucket *Bucket +} + +// Reader returns a reader that is rate limited by +// the given token bucket. Each token in the bucket +// represents one byte. +func Reader(r io.Reader, bucket *Bucket) io.Reader { + return &reader{ + r: r, + bucket: bucket, + } +} + +func (r *reader) Read(buf []byte) (int, error) { + n, err := r.r.Read(buf) + if n <= 0 { + return n, err + } + r.bucket.Wait(int64(n)) + return n, err +} + +type writer struct { + w io.Writer + bucket *Bucket +} + +// Writer returns a reader that is rate limited by +// the given token bucket. Each token in the bucket +// represents one byte. +func Writer(w io.Writer, bucket *Bucket) io.Writer { + return &writer{ + w: w, + bucket: bucket, + } +} + +func (w *writer) Write(buf []byte) (int, error) { + w.bucket.Wait(int64(len(buf))) + return w.w.Write(buf) +} diff --git a/vendor/github.com/miekg/dns/AUTHORS b/vendor/github.com/miekg/dns/AUTHORS new file mode 100644 index 000000000000..1965683525ad --- /dev/null +++ b/vendor/github.com/miekg/dns/AUTHORS @@ -0,0 +1 @@ +Miek Gieben diff --git a/vendor/github.com/miekg/dns/CONTRIBUTORS b/vendor/github.com/miekg/dns/CONTRIBUTORS new file mode 100644 index 000000000000..f77e8a895f91 --- /dev/null +++ b/vendor/github.com/miekg/dns/CONTRIBUTORS @@ -0,0 +1,9 @@ +Alex A. Skinner +Andrew Tunnell-Jones +Ask Bjørn Hansen +Dave Cheney +Dusty Wilson +Marek Majkowski +Peter van Dijk +Omri Bahumi +Alex Sergeyev diff --git a/vendor/github.com/miekg/dns/COPYRIGHT b/vendor/github.com/miekg/dns/COPYRIGHT new file mode 100644 index 000000000000..35702b10e87e --- /dev/null +++ b/vendor/github.com/miekg/dns/COPYRIGHT @@ -0,0 +1,9 @@ +Copyright 2009 The Go Authors. All rights reserved. Use of this source code +is governed by a BSD-style license that can be found in the LICENSE file. +Extensions of the original work are copyright (c) 2011 Miek Gieben + +Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. + +Copyright 2014 CloudFlare. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/miekg/dns/LICENSE new file mode 100644 index 000000000000..5763fa7fe5d9 --- /dev/null +++ b/vendor/github.com/miekg/dns/LICENSE @@ -0,0 +1,32 @@ +Extensions of the original work are copyright (c) 2011 Miek Gieben + +As this is fork of the official Go code the same license applies: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md new file mode 100644 index 000000000000..83b4183eb80d --- /dev/null +++ b/vendor/github.com/miekg/dns/README.md @@ -0,0 +1,151 @@ +[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns) + +# Alternative (more granular) approach to a DNS library + +> Less is more. + +Complete and usable DNS library. All widely used Resource Records are +supported, including the DNSSEC types. It follows a lean and mean philosophy. +If there is stuff you should know as a DNS programmer there isn't a convenience +function for it. Server side and client side programming is supported, i.e. you +can build servers and resolvers with it. + +We try to keep the "master" branch as sane as possible and at the bleeding edge +of standards, avoiding breaking changes wherever reasonable. We support the last +two versions of Go, currently: 1.5 and 1.6. + +# Goals + +* KISS; +* Fast; +* Small API, if its easy to code in Go, don't make a function for it. + +# Users + +A not-so-up-to-date-list-that-may-be-actually-current: + +* https://cloudflare.com +* https://github.com/abh/geodns +* http://www.statdns.com/ +* http://www.dnsinspect.com/ +* https://github.com/chuangbo/jianbing-dictionary-dns +* http://www.dns-lg.com/ +* https://github.com/fcambus/rrda +* https://github.com/kenshinx/godns +* https://github.com/skynetservices/skydns +* https://github.com/hashicorp/consul +* https://github.com/DevelopersPL/godnsagent +* https://github.com/duedil-ltd/discodns +* https://github.com/StalkR/dns-reverse-proxy +* https://github.com/tianon/rawdns +* https://mesosphere.github.io/mesos-dns/ +* https://pulse.turbobytes.com/ +* https://play.google.com/store/apps/details?id=com.turbobytes.dig +* https://github.com/fcambus/statzone +* https://github.com/benschw/dns-clb-go +* https://github.com/corny/dnscheck for http://public-dns.info/ +* https://namesmith.io +* https://github.com/miekg/unbound +* https://github.com/miekg/exdns +* https://dnslookup.org +* https://github.com/looterz/grimd +* https://github.com/phamhongviet/serf-dns + +Send pull request if you want to be listed here. + +# Features + +* UDP/TCP queries, IPv4 and IPv6; +* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported; +* Fast: + * Reply speed around ~ 80K qps (faster hardware results in more qps); + * Parsing RRs ~ 100K RR/s, that's 5M records in about 50 seconds; +* Server side programming (mimicking the net/http package); +* Client side programming; +* DNSSEC: signing, validating and key generation for DSA, RSA and ECDSA; +* EDNS0, NSID, Cookies; +* AXFR/IXFR; +* TSIG, SIG(0); +* DNS over TLS: optional encrypted connection between client and server; +* DNS name compression; +* Depends only on the standard library. + +Have fun! + +Miek Gieben - 2010-2012 - + +# Building + +Building is done with the `go` tool. If you have setup your GOPATH +correctly, the following should work: + + go get github.com/miekg/dns + go build github.com/miekg/dns + +## Examples + +A short "how to use the API" is at the beginning of doc.go (this also will show +when you call `godoc github.com/miekg/dns`). + +Example programs can be found in the `github.com/miekg/exdns` repository. + +## Supported RFCs + +*all of them* + +* 103{4,5} - DNS standard +* 1348 - NSAP record (removed the record) +* 1982 - Serial Arithmetic +* 1876 - LOC record +* 1995 - IXFR +* 1996 - DNS notify +* 2136 - DNS Update (dynamic updates) +* 2181 - RRset definition - there is no RRset type though, just []RR +* 2537 - RSAMD5 DNS keys +* 2065 - DNSSEC (updated in later RFCs) +* 2671 - EDNS record +* 2782 - SRV record +* 2845 - TSIG record +* 2915 - NAPTR record +* 2929 - DNS IANA Considerations +* 3110 - RSASHA1 DNS keys +* 3225 - DO bit (DNSSEC OK) +* 340{1,2,3} - NAPTR record +* 3445 - Limiting the scope of (DNS)KEY +* 3597 - Unknown RRs +* 403{3,4,5} - DNSSEC + validation functions +* 4255 - SSHFP record +* 4343 - Case insensitivity +* 4408 - SPF record +* 4509 - SHA256 Hash in DS +* 4592 - Wildcards in the DNS +* 4635 - HMAC SHA TSIG +* 4701 - DHCID +* 4892 - id.server +* 5001 - NSID +* 5155 - NSEC3 record +* 5205 - HIP record +* 5702 - SHA2 in the DNS +* 5936 - AXFR +* 5966 - TCP implementation recommendations +* 6605 - ECDSA +* 6725 - IANA Registry Update +* 6742 - ILNP DNS +* 6840 - Clarifications and Implementation Notes for DNS Security +* 6844 - CAA record +* 6891 - EDNS0 update +* 6895 - DNS IANA considerations +* 6975 - Algorithm Understanding in DNSSEC +* 7043 - EUI48/EUI64 records +* 7314 - DNS (EDNS) EXPIRE Option +* 7553 - URI record +* 7858 - DNS over TLS: Initiation and Performance Considerations (draft) +* 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies) +* xxxx - EDNS0 DNS Update Lease (draft) + +## Loosely based upon + +* `ldns` +* `NSD` +* `Net::DNS` +* `GRONG` diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go new file mode 100644 index 000000000000..1302e4e04c6d --- /dev/null +++ b/vendor/github.com/miekg/dns/client.go @@ -0,0 +1,455 @@ +package dns + +// A client implementation. + +import ( + "bytes" + "crypto/tls" + "encoding/binary" + "io" + "net" + "time" +) + +const dnsTimeout time.Duration = 2 * time.Second +const tcpIdleTimeout time.Duration = 8 * time.Second + +// A Conn represents a connection to a DNS server. +type Conn struct { + net.Conn // a net.Conn holding the connection + UDPSize uint16 // minimum receive buffer for UDP messages + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be fully qualified + rtt time.Duration + t time.Time + tsigRequestMAC string +} + +// A Client defines parameters for a DNS client. +type Client struct { + Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP) + UDPSize uint16 // minimum receive buffer for UDP messages + TLSConfig *tls.Config // TLS connection configuration + Timeout time.Duration // a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout and WriteTimeout when non-zero + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - overridden by Timeout when that value is non-zero + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be fully qualified + SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass + group singleflight +} + +// Exchange performs a synchronous UDP query. It sends the message m to the address +// contained in a and waits for an reply. Exchange does not retry a failed query, nor +// will it fall back to TCP in case of truncation. +// See client.Exchange for more information on setting larger buffer sizes. +func Exchange(m *Msg, a string) (r *Msg, err error) { + var co *Conn + co, err = DialTimeout("udp", a, dnsTimeout) + if err != nil { + return nil, err + } + + defer co.Close() + + opt := m.IsEdns0() + // If EDNS0 is used use that for size. + if opt != nil && opt.UDPSize() >= MinMsgSize { + co.UDPSize = opt.UDPSize() + } + + co.SetWriteDeadline(time.Now().Add(dnsTimeout)) + if err = co.WriteMsg(m); err != nil { + return nil, err + } + + co.SetReadDeadline(time.Now().Add(dnsTimeout)) + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, err +} + +// ExchangeConn performs a synchronous query. It sends the message m via the connection +// c and waits for a reply. The connection c is not closed by ExchangeConn. +// This function is going away, but can easily be mimicked: +// +// co := &dns.Conn{Conn: c} // c is your net.Conn +// co.WriteMsg(m) +// in, _ := co.ReadMsg() +// co.Close() +// +func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { + println("dns: this function is deprecated") + co := new(Conn) + co.Conn = c + if err = co.WriteMsg(m); err != nil { + return nil, err + } + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, err +} + +// Exchange performs an synchronous query. It sends the message m to the address +// contained in a and waits for an reply. Basic use pattern with a *dns.Client: +// +// c := new(dns.Client) +// in, rtt, err := c.Exchange(message, "127.0.0.1:53") +// +// Exchange does not retry a failed query, nor will it fall back to TCP in +// case of truncation. +// It is up to the caller to create a message that allows for larger responses to be +// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger +// buffer, see SetEdns0. Messsages without an OPT RR will fallback to the historic limit +// of 512 bytes. +func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + if !c.SingleInflight { + return c.exchange(m, a) + } + // This adds a bunch of garbage, TODO(miek). + t := "nop" + if t1, ok := TypeToString[m.Question[0].Qtype]; ok { + t = t1 + } + cl := "nop" + if cl1, ok := ClassToString[m.Question[0].Qclass]; ok { + cl = cl1 + } + r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) { + return c.exchange(m, a) + }) + if err != nil { + return r, rtt, err + } + if shared { + return r.Copy(), rtt, nil + } + return r, rtt, nil +} + +func (c *Client) dialTimeout() time.Duration { + if c.Timeout != 0 { + return c.Timeout + } + if c.DialTimeout != 0 { + return c.DialTimeout + } + return dnsTimeout +} + +func (c *Client) readTimeout() time.Duration { + if c.ReadTimeout != 0 { + return c.ReadTimeout + } + return dnsTimeout +} + +func (c *Client) writeTimeout() time.Duration { + if c.WriteTimeout != 0 { + return c.WriteTimeout + } + return dnsTimeout +} + +func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + var co *Conn + network := "udp" + tls := false + + switch c.Net { + case "tcp-tls": + network = "tcp" + tls = true + case "tcp4-tls": + network = "tcp4" + tls = true + case "tcp6-tls": + network = "tcp6" + tls = true + default: + if c.Net != "" { + network = c.Net + } + } + + var deadline time.Time + if c.Timeout != 0 { + deadline = time.Now().Add(c.Timeout) + } + + if tls { + co, err = DialTimeoutWithTLS(network, a, c.TLSConfig, c.dialTimeout()) + } else { + co, err = DialTimeout(network, a, c.dialTimeout()) + } + + if err != nil { + return nil, 0, err + } + defer co.Close() + + opt := m.IsEdns0() + // If EDNS0 is used use that for size. + if opt != nil && opt.UDPSize() >= MinMsgSize { + co.UDPSize = opt.UDPSize() + } + // Otherwise use the client's configured UDP size. + if opt == nil && c.UDPSize >= MinMsgSize { + co.UDPSize = c.UDPSize + } + + co.TsigSecret = c.TsigSecret + co.SetWriteDeadline(deadlineOrTimeout(deadline, c.writeTimeout())) + if err = co.WriteMsg(m); err != nil { + return nil, 0, err + } + + co.SetReadDeadline(deadlineOrTimeout(deadline, c.readTimeout())) + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, co.rtt, err +} + +// ReadMsg reads a message from the connection co. +// If the received message contains a TSIG record the transaction +// signature is verified. +func (co *Conn) ReadMsg() (*Msg, error) { + p, err := co.ReadMsgHeader(nil) + if err != nil { + return nil, err + } + + m := new(Msg) + if err := m.Unpack(p); err != nil { + // If ErrTruncated was returned, we still want to allow the user to use + // the message, but naively they can just check err if they don't want + // to use a truncated message + if err == ErrTruncated { + return m, err + } + return nil, err + } + if t := m.IsTsig(); t != nil { + if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { + return m, ErrSecret + } + // Need to work on the original message p, as that was used to calculate the tsig. + err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) + } + return m, err +} + +// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil). +// Returns message as a byte slice to be parsed with Msg.Unpack later on. +// Note that error handling on the message body is not possible as only the header is parsed. +func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) { + var ( + p []byte + n int + err error + ) + + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + r := t.(io.Reader) + + // First two bytes specify the length of the entire message. + l, err := tcpMsgLen(r) + if err != nil { + return nil, err + } + p = make([]byte, l) + n, err = tcpRead(r, p) + co.rtt = time.Since(co.t) + default: + if co.UDPSize > MinMsgSize { + p = make([]byte, co.UDPSize) + } else { + p = make([]byte, MinMsgSize) + } + n, err = co.Read(p) + co.rtt = time.Since(co.t) + } + + if err != nil { + return nil, err + } else if n < headerSize { + return nil, ErrShortRead + } + + p = p[:n] + if hdr != nil { + dh, _, err := unpackMsgHdr(p, 0) + if err != nil { + return nil, err + } + *hdr = dh + } + return p, err +} + +// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length. +func tcpMsgLen(t io.Reader) (int, error) { + p := []byte{0, 0} + n, err := t.Read(p) + if err != nil { + return 0, err + } + if n != 2 { + return 0, ErrShortRead + } + l := binary.BigEndian.Uint16(p) + if l == 0 { + return 0, ErrShortRead + } + return int(l), nil +} + +// tcpRead calls TCPConn.Read enough times to fill allocated buffer. +func tcpRead(t io.Reader, p []byte) (int, error) { + n, err := t.Read(p) + if err != nil { + return n, err + } + for n < len(p) { + j, err := t.Read(p[n:]) + if err != nil { + return n, err + } + n += j + } + return n, err +} + +// Read implements the net.Conn read method. +func (co *Conn) Read(p []byte) (n int, err error) { + if co.Conn == nil { + return 0, ErrConnEmpty + } + if len(p) < 2 { + return 0, io.ErrShortBuffer + } + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + r := t.(io.Reader) + + l, err := tcpMsgLen(r) + if err != nil { + return 0, err + } + if l > len(p) { + return int(l), io.ErrShortBuffer + } + return tcpRead(r, p[:l]) + } + // UDP connection + n, err = co.Conn.Read(p) + if err != nil { + return n, err + } + return n, err +} + +// WriteMsg sends a message through the connection co. +// If the message m contains a TSIG record the transaction +// signature is calculated. +func (co *Conn) WriteMsg(m *Msg) (err error) { + var out []byte + if t := m.IsTsig(); t != nil { + mac := "" + if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { + return ErrSecret + } + out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) + // Set for the next read, although only used in zone transfers + co.tsigRequestMAC = mac + } else { + out, err = m.Pack() + } + if err != nil { + return err + } + co.t = time.Now() + if _, err = co.Write(out); err != nil { + return err + } + return nil +} + +// Write implements the net.Conn Write method. +func (co *Conn) Write(p []byte) (n int, err error) { + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + w := t.(io.Writer) + + lp := len(p) + if lp < 2 { + return 0, io.ErrShortBuffer + } + if lp > MaxMsgSize { + return 0, &Error{err: "message too large"} + } + l := make([]byte, 2, lp+2) + binary.BigEndian.PutUint16(l, uint16(lp)) + p = append(l, p...) + n, err := io.Copy(w, bytes.NewReader(p)) + return int(n), err + } + n, err = co.Conn.(*net.UDPConn).Write(p) + return n, err +} + +// Dial connects to the address on the named network. +func Dial(network, address string) (conn *Conn, err error) { + conn = new(Conn) + conn.Conn, err = net.Dial(network, address) + if err != nil { + return nil, err + } + return conn, nil +} + +// DialTimeout acts like Dial but takes a timeout. +func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) { + conn = new(Conn) + conn.Conn, err = net.DialTimeout(network, address, timeout) + if err != nil { + return nil, err + } + return conn, nil +} + +// DialWithTLS connects to the address on the named network with TLS. +func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) { + conn = new(Conn) + conn.Conn, err = tls.Dial(network, address, tlsConfig) + if err != nil { + return nil, err + } + return conn, nil +} + +// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout. +func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) { + var dialer net.Dialer + dialer.Timeout = timeout + + conn = new(Conn) + conn.Conn, err = tls.DialWithDialer(&dialer, network, address, tlsConfig) + if err != nil { + return nil, err + } + return conn, nil +} + +func deadlineOrTimeout(deadline time.Time, timeout time.Duration) time.Time { + if deadline.IsZero() { + return time.Now().Add(timeout) + } + return deadline +} diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go new file mode 100644 index 000000000000..cfa9ad0b228e --- /dev/null +++ b/vendor/github.com/miekg/dns/clientconfig.go @@ -0,0 +1,99 @@ +package dns + +import ( + "bufio" + "os" + "strconv" + "strings" +) + +// ClientConfig wraps the contents of the /etc/resolv.conf file. +type ClientConfig struct { + Servers []string // servers to use + Search []string // suffixes to append to local name + Port string // what port to use + Ndots int // number of dots in name to trigger absolute lookup + Timeout int // seconds before giving up on packet + Attempts int // lost packets before giving up on server, not used in the package dns +} + +// ClientConfigFromFile parses a resolv.conf(5) like file and returns +// a *ClientConfig. +func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) { + file, err := os.Open(resolvconf) + if err != nil { + return nil, err + } + defer file.Close() + c := new(ClientConfig) + scanner := bufio.NewScanner(file) + c.Servers = make([]string, 0) + c.Search = make([]string, 0) + c.Port = "53" + c.Ndots = 1 + c.Timeout = 5 + c.Attempts = 2 + + for scanner.Scan() { + if err := scanner.Err(); err != nil { + return nil, err + } + line := scanner.Text() + f := strings.Fields(line) + if len(f) < 1 { + continue + } + switch f[0] { + case "nameserver": // add one name server + if len(f) > 1 { + // One more check: make sure server name is + // just an IP address. Otherwise we need DNS + // to look it up. + name := f[1] + c.Servers = append(c.Servers, name) + } + + case "domain": // set search path to just this domain + if len(f) > 1 { + c.Search = make([]string, 1) + c.Search[0] = f[1] + } else { + c.Search = make([]string, 0) + } + + case "search": // set search path to given servers + c.Search = make([]string, len(f)-1) + for i := 0; i < len(c.Search); i++ { + c.Search[i] = f[i+1] + } + + case "options": // magic options + for i := 1; i < len(f); i++ { + s := f[i] + switch { + case len(s) >= 6 && s[:6] == "ndots:": + n, _ := strconv.Atoi(s[6:]) + if n < 1 { + n = 1 + } + c.Ndots = n + case len(s) >= 8 && s[:8] == "timeout:": + n, _ := strconv.Atoi(s[8:]) + if n < 1 { + n = 1 + } + c.Timeout = n + case len(s) >= 8 && s[:9] == "attempts:": + n, _ := strconv.Atoi(s[9:]) + if n < 1 { + n = 1 + } + c.Attempts = n + case s == "rotate": + /* not imp */ + } + } + } + } + return c, nil +} diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go new file mode 100644 index 000000000000..cf456165f435 --- /dev/null +++ b/vendor/github.com/miekg/dns/defaults.go @@ -0,0 +1,282 @@ +package dns + +import ( + "errors" + "net" + "strconv" +) + +const hexDigit = "0123456789abcdef" + +// Everything is assumed in ClassINET. + +// SetReply creates a reply message from a request message. +func (dns *Msg) SetReply(request *Msg) *Msg { + dns.Id = request.Id + dns.RecursionDesired = request.RecursionDesired // Copy rd bit + dns.Response = true + dns.Opcode = OpcodeQuery + dns.Rcode = RcodeSuccess + if len(request.Question) > 0 { + dns.Question = make([]Question, 1) + dns.Question[0] = request.Question[0] + } + return dns +} + +// SetQuestion creates a question message, it sets the Question +// section, generates an Id and sets the RecursionDesired (RD) +// bit to true. +func (dns *Msg) SetQuestion(z string, t uint16) *Msg { + dns.Id = Id() + dns.RecursionDesired = true + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, t, ClassINET} + return dns +} + +// SetNotify creates a notify message, it sets the Question +// section, generates an Id and sets the Authoritative (AA) +// bit to true. +func (dns *Msg) SetNotify(z string) *Msg { + dns.Opcode = OpcodeNotify + dns.Authoritative = true + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeSOA, ClassINET} + return dns +} + +// SetRcode creates an error message suitable for the request. +func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg { + dns.SetReply(request) + dns.Rcode = rcode + return dns +} + +// SetRcodeFormatError creates a message with FormError set. +func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg { + dns.Rcode = RcodeFormatError + dns.Opcode = OpcodeQuery + dns.Response = true + dns.Authoritative = false + dns.Id = request.Id + return dns +} + +// SetUpdate makes the message a dynamic update message. It +// sets the ZONE section to: z, TypeSOA, ClassINET. +func (dns *Msg) SetUpdate(z string) *Msg { + dns.Id = Id() + dns.Response = false + dns.Opcode = OpcodeUpdate + dns.Compress = false // BIND9 cannot handle compression + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeSOA, ClassINET} + return dns +} + +// SetIxfr creates message for requesting an IXFR. +func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg { + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Ns = make([]RR, 1) + s := new(SOA) + s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0} + s.Serial = serial + s.Ns = ns + s.Mbox = mbox + dns.Question[0] = Question{z, TypeIXFR, ClassINET} + dns.Ns[0] = s + return dns +} + +// SetAxfr creates message for requesting an AXFR. +func (dns *Msg) SetAxfr(z string) *Msg { + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeAXFR, ClassINET} + return dns +} + +// SetTsig appends a TSIG RR to the message. +// This is only a skeleton TSIG RR that is added as the last RR in the +// additional section. The Tsig is calculated when the message is being send. +func (dns *Msg) SetTsig(z, algo string, fudge, timesigned int64) *Msg { + t := new(TSIG) + t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0} + t.Algorithm = algo + t.Fudge = 300 + t.TimeSigned = uint64(timesigned) + t.OrigId = dns.Id + dns.Extra = append(dns.Extra, t) + return dns +} + +// SetEdns0 appends a EDNS0 OPT RR to the message. +// TSIG should always the last RR in a message. +func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg { + e := new(OPT) + e.Hdr.Name = "." + e.Hdr.Rrtype = TypeOPT + e.SetUDPSize(udpsize) + if do { + e.SetDo() + } + dns.Extra = append(dns.Extra, e) + return dns +} + +// IsTsig checks if the message has a TSIG record as the last record +// in the additional section. It returns the TSIG record found or nil. +func (dns *Msg) IsTsig() *TSIG { + if len(dns.Extra) > 0 { + if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG { + return dns.Extra[len(dns.Extra)-1].(*TSIG) + } + } + return nil +} + +// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0 +// record in the additional section will do. It returns the OPT record +// found or nil. +func (dns *Msg) IsEdns0() *OPT { + // EDNS0 is at the end of the additional section, start there. + // We might want to change this to *only* look at the last two + // records. So we see TSIG and/or OPT - this a slightly bigger + // change though. + for i := len(dns.Extra) - 1; i >= 0; i-- { + if dns.Extra[i].Header().Rrtype == TypeOPT { + return dns.Extra[i].(*OPT) + } + } + return nil +} + +// IsDomainName checks if s is a valid domain name, it returns the number of +// labels and true, when a domain name is valid. Note that non fully qualified +// domain name is considered valid, in this case the last label is counted in +// the number of labels. When false is returned the number of labels is not +// defined. Also note that this function is extremely liberal; almost any +// string is a valid domain name as the DNS is 8 bit protocol. It checks if each +// label fits in 63 characters, but there is no length check for the entire +// string s. I.e. a domain name longer than 255 characters is considered valid. +func IsDomainName(s string) (labels int, ok bool) { + _, labels, err := packDomainName(s, nil, 0, nil, false) + return labels, err == nil +} + +// IsSubDomain checks if child is indeed a child of the parent. If child and parent +// are the same domain true is returned as well. +func IsSubDomain(parent, child string) bool { + // Entire child is contained in parent + return CompareDomainName(parent, child) == CountLabel(parent) +} + +// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet. +// The checking is performed on the binary payload. +func IsMsg(buf []byte) error { + // Header + if len(buf) < 12 { + return errors.New("dns: bad message header") + } + // Header: Opcode + // TODO(miek): more checks here, e.g. check all header bits. + return nil +} + +// IsFqdn checks if a domain name is fully qualified. +func IsFqdn(s string) bool { + l := len(s) + if l == 0 { + return false + } + return s[l-1] == '.' +} + +// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181. +// This means the RRs need to have the same type, name, and class. Returns true +// if the RR set is valid, otherwise false. +func IsRRset(rrset []RR) bool { + if len(rrset) == 0 { + return false + } + if len(rrset) == 1 { + return true + } + rrHeader := rrset[0].Header() + rrType := rrHeader.Rrtype + rrClass := rrHeader.Class + rrName := rrHeader.Name + + for _, rr := range rrset[1:] { + curRRHeader := rr.Header() + if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName { + // Mismatch between the records, so this is not a valid rrset for + //signing/verifying + return false + } + } + + return true +} + +// Fqdn return the fully qualified domain name from s. +// If s is already fully qualified, it behaves as the identity function. +func Fqdn(s string) string { + if IsFqdn(s) { + return s + } + return s + "." +} + +// Copied from the official Go code. + +// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP +// address suitable for reverse DNS (PTR) record lookups or an error if it fails +// to parse the IP address. +func ReverseAddr(addr string) (arpa string, err error) { + ip := net.ParseIP(addr) + if ip == nil { + return "", &Error{err: "unrecognized address: " + addr} + } + if ip.To4() != nil { + return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." + + strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil + } + // Must be IPv6 + buf := make([]byte, 0, len(ip)*4+len("ip6.arpa.")) + // Add it, in reverse, to the buffer + for i := len(ip) - 1; i >= 0; i-- { + v := ip[i] + buf = append(buf, hexDigit[v&0xF]) + buf = append(buf, '.') + buf = append(buf, hexDigit[v>>4]) + buf = append(buf, '.') + } + // Append "ip6.arpa." and return (buf already has the final .) + buf = append(buf, "ip6.arpa."...) + return string(buf), nil +} + +// String returns the string representation for the type t. +func (t Type) String() string { + if t1, ok := TypeToString[uint16(t)]; ok { + return t1 + } + return "TYPE" + strconv.Itoa(int(t)) +} + +// String returns the string representation for the class c. +func (c Class) String() string { + if c1, ok := ClassToString[uint16(c)]; ok { + return c1 + } + return "CLASS" + strconv.Itoa(int(c)) +} + +// String returns the string representation for the name n. +func (n Name) String() string { + return sprintName(string(n)) +} diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go new file mode 100644 index 000000000000..b3292287ce75 --- /dev/null +++ b/vendor/github.com/miekg/dns/dns.go @@ -0,0 +1,104 @@ +package dns + +import "strconv" + +const ( + year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits. + defaultTtl = 3600 // Default internal TTL. + + DefaultMsgSize = 4096 // DefaultMsgSize is the standard default for messages larger than 512 bytes. + MinMsgSize = 512 // MinMsgSize is the minimal size of a DNS packet. + MaxMsgSize = 65535 // MaxMsgSize is the largest possible DNS packet. +) + +// Error represents a DNS error. +type Error struct{ err string } + +func (e *Error) Error() string { + if e == nil { + return "dns: " + } + return "dns: " + e.err +} + +// An RR represents a resource record. +type RR interface { + // Header returns the header of an resource record. The header contains + // everything up to the rdata. + Header() *RR_Header + // String returns the text representation of the resource record. + String() string + + // copy returns a copy of the RR + copy() RR + // len returns the length (in octets) of the uncompressed RR in wire format. + len() int + // pack packs an RR into wire format. + pack([]byte, int, map[string]int, bool) (int, error) +} + +// RR_Header is the header all DNS resource records share. +type RR_Header struct { + Name string `dns:"cdomain-name"` + Rrtype uint16 + Class uint16 + Ttl uint32 + Rdlength uint16 // Length of data after header. +} + +// Header returns itself. This is here to make RR_Header implements the RR interface. +func (h *RR_Header) Header() *RR_Header { return h } + +// Just to implement the RR interface. +func (h *RR_Header) copy() RR { return nil } + +func (h *RR_Header) copyHeader() *RR_Header { + r := new(RR_Header) + r.Name = h.Name + r.Rrtype = h.Rrtype + r.Class = h.Class + r.Ttl = h.Ttl + r.Rdlength = h.Rdlength + return r +} + +func (h *RR_Header) String() string { + var s string + + if h.Rrtype == TypeOPT { + s = ";" + // and maybe other things + } + + s += sprintName(h.Name) + "\t" + s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" + s += Class(h.Class).String() + "\t" + s += Type(h.Rrtype).String() + "\t" + return s +} + +func (h *RR_Header) len() int { + l := len(h.Name) + 1 + l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2) + return l +} + +// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597. +func (rr *RFC3597) ToRFC3597(r RR) error { + buf := make([]byte, r.len()*2) + off, err := PackRR(r, buf, 0, nil, false) + if err != nil { + return err + } + buf = buf[:off] + if int(r.Header().Rdlength) > off { + return ErrBuf + } + + rfc3597, _, err := unpackRFC3597(*r.Header(), buf, off-int(r.Header().Rdlength)) + if err != nil { + return err + } + *rr = *rfc3597.(*RFC3597) + return nil +} diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go new file mode 100644 index 000000000000..f5f3fbdd899f --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -0,0 +1,721 @@ +package dns + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + _ "crypto/md5" + "crypto/rand" + "crypto/rsa" + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/asn1" + "encoding/binary" + "encoding/hex" + "math/big" + "sort" + "strings" + "time" +) + +// DNSSEC encryption algorithm codes. +const ( + _ uint8 = iota + RSAMD5 + DH + DSA + _ // Skip 4, RFC 6725, section 2.1 + RSASHA1 + DSANSEC3SHA1 + RSASHA1NSEC3SHA1 + RSASHA256 + _ // Skip 9, RFC 6725, section 2.1 + RSASHA512 + _ // Skip 11, RFC 6725, section 2.1 + ECCGOST + ECDSAP256SHA256 + ECDSAP384SHA384 + INDIRECT uint8 = 252 + PRIVATEDNS uint8 = 253 // Private (experimental keys) + PRIVATEOID uint8 = 254 +) + +// Map for algorithm names. +var AlgorithmToString = map[uint8]string{ + RSAMD5: "RSAMD5", + DH: "DH", + DSA: "DSA", + RSASHA1: "RSASHA1", + DSANSEC3SHA1: "DSA-NSEC3-SHA1", + RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1", + RSASHA256: "RSASHA256", + RSASHA512: "RSASHA512", + ECCGOST: "ECC-GOST", + ECDSAP256SHA256: "ECDSAP256SHA256", + ECDSAP384SHA384: "ECDSAP384SHA384", + INDIRECT: "INDIRECT", + PRIVATEDNS: "PRIVATEDNS", + PRIVATEOID: "PRIVATEOID", +} + +// Map of algorithm strings. +var StringToAlgorithm = reverseInt8(AlgorithmToString) + +// Map of algorithm crypto hashes. +var AlgorithmToHash = map[uint8]crypto.Hash{ + RSAMD5: crypto.MD5, // Deprecated in RFC 6725 + RSASHA1: crypto.SHA1, + RSASHA1NSEC3SHA1: crypto.SHA1, + RSASHA256: crypto.SHA256, + ECDSAP256SHA256: crypto.SHA256, + ECDSAP384SHA384: crypto.SHA384, + RSASHA512: crypto.SHA512, +} + +// DNSSEC hashing algorithm codes. +const ( + _ uint8 = iota + SHA1 // RFC 4034 + SHA256 // RFC 4509 + GOST94 // RFC 5933 + SHA384 // Experimental + SHA512 // Experimental +) + +// Map for hash names. +var HashToString = map[uint8]string{ + SHA1: "SHA1", + SHA256: "SHA256", + GOST94: "GOST94", + SHA384: "SHA384", + SHA512: "SHA512", +} + +// Map of hash strings. +var StringToHash = reverseInt8(HashToString) + +// DNSKEY flag values. +const ( + SEP = 1 + REVOKE = 1 << 7 + ZONE = 1 << 8 +) + +// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing. +type rrsigWireFmt struct { + TypeCovered uint16 + Algorithm uint8 + Labels uint8 + OrigTtl uint32 + Expiration uint32 + Inception uint32 + KeyTag uint16 + SignerName string `dns:"domain-name"` + /* No Signature */ +} + +// Used for converting DNSKEY's rdata to wirefmt. +type dnskeyWireFmt struct { + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` + /* Nothing is left out */ +} + +func divRoundUp(a, b int) int { + return (a + b - 1) / b +} + +// KeyTag calculates the keytag (or key-id) of the DNSKEY. +func (k *DNSKEY) KeyTag() uint16 { + if k == nil { + return 0 + } + var keytag int + switch k.Algorithm { + case RSAMD5: + // Look at the bottom two bytes of the modules, which the last + // item in the pubkey. We could do this faster by looking directly + // at the base64 values. But I'm lazy. + modulus, _ := fromBase64([]byte(k.PublicKey)) + if len(modulus) > 1 { + x := binary.BigEndian.Uint16(modulus[len(modulus)-2:]) + keytag = int(x) + } + default: + keywire := new(dnskeyWireFmt) + keywire.Flags = k.Flags + keywire.Protocol = k.Protocol + keywire.Algorithm = k.Algorithm + keywire.PublicKey = k.PublicKey + wire := make([]byte, DefaultMsgSize) + n, err := packKeyWire(keywire, wire) + if err != nil { + return 0 + } + wire = wire[:n] + for i, v := range wire { + if i&1 != 0 { + keytag += int(v) // must be larger than uint32 + } else { + keytag += int(v) << 8 + } + } + keytag += (keytag >> 16) & 0xFFFF + keytag &= 0xFFFF + } + return uint16(keytag) +} + +// ToDS converts a DNSKEY record to a DS record. +func (k *DNSKEY) ToDS(h uint8) *DS { + if k == nil { + return nil + } + ds := new(DS) + ds.Hdr.Name = k.Hdr.Name + ds.Hdr.Class = k.Hdr.Class + ds.Hdr.Rrtype = TypeDS + ds.Hdr.Ttl = k.Hdr.Ttl + ds.Algorithm = k.Algorithm + ds.DigestType = h + ds.KeyTag = k.KeyTag() + + keywire := new(dnskeyWireFmt) + keywire.Flags = k.Flags + keywire.Protocol = k.Protocol + keywire.Algorithm = k.Algorithm + keywire.PublicKey = k.PublicKey + wire := make([]byte, DefaultMsgSize) + n, err := packKeyWire(keywire, wire) + if err != nil { + return nil + } + wire = wire[:n] + + owner := make([]byte, 255) + off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false) + if err1 != nil { + return nil + } + owner = owner[:off] + // RFC4034: + // digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA); + // "|" denotes concatenation + // DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key. + + // digest buffer + digest := append(owner, wire...) // another copy + + var hash crypto.Hash + switch h { + case SHA1: + hash = crypto.SHA1 + case SHA256: + hash = crypto.SHA256 + case SHA384: + hash = crypto.SHA384 + case SHA512: + hash = crypto.SHA512 + default: + return nil + } + + s := hash.New() + s.Write(digest) + ds.Digest = hex.EncodeToString(s.Sum(nil)) + return ds +} + +// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record. +func (k *DNSKEY) ToCDNSKEY() *CDNSKEY { + c := &CDNSKEY{DNSKEY: *k} + c.Hdr = *k.Hdr.copyHeader() + c.Hdr.Rrtype = TypeCDNSKEY + return c +} + +// ToCDS converts a DS record to a CDS record. +func (d *DS) ToCDS() *CDS { + c := &CDS{DS: *d} + c.Hdr = *d.Hdr.copyHeader() + c.Hdr.Rrtype = TypeCDS + return c +} + +// Sign signs an RRSet. The signature needs to be filled in with the values: +// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied +// from the RRset. Sign returns a non-nill error when the signing went OK. +// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non +// zero, it is used as-is, otherwise the TTL of the RRset is used as the +// OrigTTL. +func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { + if k == nil { + return ErrPrivKey + } + // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + + rr.Hdr.Rrtype = TypeRRSIG + rr.Hdr.Name = rrset[0].Header().Name + rr.Hdr.Class = rrset[0].Header().Class + if rr.OrigTtl == 0 { // If set don't override + rr.OrigTtl = rrset[0].Header().Ttl + } + rr.TypeCovered = rrset[0].Header().Rrtype + rr.Labels = uint8(CountLabel(rrset[0].Header().Name)) + + if strings.HasPrefix(rrset[0].Header().Name, "*") { + rr.Labels-- // wildcard, remove from label count + } + + sigwire := new(rrsigWireFmt) + sigwire.TypeCovered = rr.TypeCovered + sigwire.Algorithm = rr.Algorithm + sigwire.Labels = rr.Labels + sigwire.OrigTtl = rr.OrigTtl + sigwire.Expiration = rr.Expiration + sigwire.Inception = rr.Inception + sigwire.KeyTag = rr.KeyTag + // For signing, lowercase this name + sigwire.SignerName = strings.ToLower(rr.SignerName) + + // Create the desired binary blob + signdata := make([]byte, DefaultMsgSize) + n, err := packSigWire(sigwire, signdata) + if err != nil { + return err + } + signdata = signdata[:n] + wire, err := rawSignatureData(rrset, rr) + if err != nil { + return err + } + signdata = append(signdata, wire...) + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return ErrAlg + } + + h := hash.New() + h.Write(signdata) + + signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm) + if err != nil { + return err + } + + rr.Signature = toBase64(signature) + + return nil +} + +func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) { + signature, err := k.Sign(rand.Reader, hashed, hash) + if err != nil { + return nil, err + } + + switch alg { + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512: + return signature, nil + + case ECDSAP256SHA256, ECDSAP384SHA384: + ecdsaSignature := &struct { + R, S *big.Int + }{} + if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil { + return nil, err + } + + var intlen int + switch alg { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + + signature := intToBytes(ecdsaSignature.R, intlen) + signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...) + return signature, nil + + // There is no defined interface for what a DSA backed crypto.Signer returns + case DSA, DSANSEC3SHA1: + // t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8) + // signature := []byte{byte(t)} + // signature = append(signature, intToBytes(r1, 20)...) + // signature = append(signature, intToBytes(s1, 20)...) + // rr.Signature = signature + } + + return nil, ErrAlg +} + +// Verify validates an RRSet with the signature and key. This is only the +// cryptographic test, the signature validity period must be checked separately. +// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work. +func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { + // First the easy checks + if !IsRRset(rrset) { + return ErrRRset + } + if rr.KeyTag != k.KeyTag() { + return ErrKey + } + if rr.Hdr.Class != k.Hdr.Class { + return ErrKey + } + if rr.Algorithm != k.Algorithm { + return ErrKey + } + if strings.ToLower(rr.SignerName) != strings.ToLower(k.Hdr.Name) { + return ErrKey + } + if k.Protocol != 3 { + return ErrKey + } + + // IsRRset checked that we have at least one RR and that the RRs in + // the set have consistent type, class, and name. Also check that type and + // class matches the RRSIG record. + if rrset[0].Header().Class != rr.Hdr.Class { + return ErrRRset + } + if rrset[0].Header().Rrtype != rr.TypeCovered { + return ErrRRset + } + + // RFC 4035 5.3.2. Reconstructing the Signed Data + // Copy the sig, except the rrsig data + sigwire := new(rrsigWireFmt) + sigwire.TypeCovered = rr.TypeCovered + sigwire.Algorithm = rr.Algorithm + sigwire.Labels = rr.Labels + sigwire.OrigTtl = rr.OrigTtl + sigwire.Expiration = rr.Expiration + sigwire.Inception = rr.Inception + sigwire.KeyTag = rr.KeyTag + sigwire.SignerName = strings.ToLower(rr.SignerName) + // Create the desired binary blob + signeddata := make([]byte, DefaultMsgSize) + n, err := packSigWire(sigwire, signeddata) + if err != nil { + return err + } + signeddata = signeddata[:n] + wire, err := rawSignatureData(rrset, rr) + if err != nil { + return err + } + signeddata = append(signeddata, wire...) + + sigbuf := rr.sigBuf() // Get the binary signature data + if rr.Algorithm == PRIVATEDNS { // PRIVATEOID + // TODO(miek) + // remove the domain name and assume its ours? + } + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return ErrAlg + } + + switch rr.Algorithm { + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5: + // TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere?? + pubkey := k.publicKeyRSA() // Get the key + if pubkey == nil { + return ErrKey + } + + h := hash.New() + h.Write(signeddata) + return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf) + + case ECDSAP256SHA256, ECDSAP384SHA384: + pubkey := k.publicKeyECDSA() + if pubkey == nil { + return ErrKey + } + + // Split sigbuf into the r and s coordinates + r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2]) + s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:]) + + h := hash.New() + h.Write(signeddata) + if ecdsa.Verify(pubkey, h.Sum(nil), r, s) { + return nil + } + return ErrSig + + default: + return ErrAlg + } +} + +// ValidityPeriod uses RFC1982 serial arithmetic to calculate +// if a signature period is valid. If t is the zero time, the +// current time is taken other t is. Returns true if the signature +// is valid at the given time, otherwise returns false. +func (rr *RRSIG) ValidityPeriod(t time.Time) bool { + var utc int64 + if t.IsZero() { + utc = time.Now().UTC().Unix() + } else { + utc = t.UTC().Unix() + } + modi := (int64(rr.Inception) - utc) / year68 + mode := (int64(rr.Expiration) - utc) / year68 + ti := int64(rr.Inception) + (modi * year68) + te := int64(rr.Expiration) + (mode * year68) + return ti <= utc && utc <= te +} + +// Return the signatures base64 encodedig sigdata as a byte slice. +func (rr *RRSIG) sigBuf() []byte { + sigbuf, err := fromBase64([]byte(rr.Signature)) + if err != nil { + return nil + } + return sigbuf +} + +// publicKeyRSA returns the RSA public key from a DNSKEY record. +func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + + // RFC 2537/3110, section 2. RSA Public KEY Resource Records + // Length is in the 0th byte, unless its zero, then it + // it in bytes 1 and 2 and its a 16 bit number + explen := uint16(keybuf[0]) + keyoff := 1 + if explen == 0 { + explen = uint16(keybuf[1])<<8 | uint16(keybuf[2]) + keyoff = 3 + } + pubkey := new(rsa.PublicKey) + + pubkey.N = big.NewInt(0) + shift := uint64((explen - 1) * 8) + expo := uint64(0) + for i := int(explen - 1); i > 0; i-- { + expo += uint64(keybuf[keyoff+i]) << shift + shift -= 8 + } + // Remainder + expo += uint64(keybuf[keyoff]) + if expo > 2<<31 { + // Larger expo than supported. + // println("dns: F5 primes (or larger) are not supported") + return nil + } + pubkey.E = int(expo) + + pubkey.N.SetBytes(keybuf[keyoff+int(explen):]) + return pubkey +} + +// publicKeyECDSA returns the Curve public key from the DNSKEY record. +func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + pubkey := new(ecdsa.PublicKey) + switch k.Algorithm { + case ECDSAP256SHA256: + pubkey.Curve = elliptic.P256() + if len(keybuf) != 64 { + // wrongly encoded key + return nil + } + case ECDSAP384SHA384: + pubkey.Curve = elliptic.P384() + if len(keybuf) != 96 { + // Wrongly encoded key + return nil + } + } + pubkey.X = big.NewInt(0) + pubkey.X.SetBytes(keybuf[:len(keybuf)/2]) + pubkey.Y = big.NewInt(0) + pubkey.Y.SetBytes(keybuf[len(keybuf)/2:]) + return pubkey +} + +func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + if len(keybuf) < 22 { + return nil + } + t, keybuf := int(keybuf[0]), keybuf[1:] + size := 64 + t*8 + q, keybuf := keybuf[:20], keybuf[20:] + if len(keybuf) != 3*size { + return nil + } + p, keybuf := keybuf[:size], keybuf[size:] + g, y := keybuf[:size], keybuf[size:] + pubkey := new(dsa.PublicKey) + pubkey.Parameters.Q = big.NewInt(0).SetBytes(q) + pubkey.Parameters.P = big.NewInt(0).SetBytes(p) + pubkey.Parameters.G = big.NewInt(0).SetBytes(g) + pubkey.Y = big.NewInt(0).SetBytes(y) + return pubkey +} + +type wireSlice [][]byte + +func (p wireSlice) Len() int { return len(p) } +func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p wireSlice) Less(i, j int) bool { + _, ioff, _ := UnpackDomainName(p[i], 0) + _, joff, _ := UnpackDomainName(p[j], 0) + return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0 +} + +// Return the raw signature data. +func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) { + wires := make(wireSlice, len(rrset)) + for i, r := range rrset { + r1 := r.copy() + r1.Header().Ttl = s.OrigTtl + labels := SplitDomainName(r1.Header().Name) + // 6.2. Canonical RR Form. (4) - wildcards + if len(labels) > int(s.Labels) { + // Wildcard + r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "." + } + // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase + r1.Header().Name = strings.ToLower(r1.Header().Name) + // 6.2. Canonical RR Form. (3) - domain rdata to lowercase. + // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR, + // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX, + // SRV, DNAME, A6 + // + // RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC): + // Section 6.2 of [RFC4034] also erroneously lists HINFO as a record + // that needs conversion to lowercase, and twice at that. Since HINFO + // records contain no domain names, they are not subject to case + // conversion. + switch x := r1.(type) { + case *NS: + x.Ns = strings.ToLower(x.Ns) + case *CNAME: + x.Target = strings.ToLower(x.Target) + case *SOA: + x.Ns = strings.ToLower(x.Ns) + x.Mbox = strings.ToLower(x.Mbox) + case *MB: + x.Mb = strings.ToLower(x.Mb) + case *MG: + x.Mg = strings.ToLower(x.Mg) + case *MR: + x.Mr = strings.ToLower(x.Mr) + case *PTR: + x.Ptr = strings.ToLower(x.Ptr) + case *MINFO: + x.Rmail = strings.ToLower(x.Rmail) + x.Email = strings.ToLower(x.Email) + case *MX: + x.Mx = strings.ToLower(x.Mx) + case *NAPTR: + x.Replacement = strings.ToLower(x.Replacement) + case *KX: + x.Exchanger = strings.ToLower(x.Exchanger) + case *SRV: + x.Target = strings.ToLower(x.Target) + case *DNAME: + x.Target = strings.ToLower(x.Target) + } + // 6.2. Canonical RR Form. (5) - origTTL + wire := make([]byte, r1.len()+1) // +1 to be safe(r) + off, err1 := PackRR(r1, wire, 0, nil, false) + if err1 != nil { + return nil, err1 + } + wire = wire[:off] + wires[i] = wire + } + sort.Sort(wires) + for i, wire := range wires { + if i > 0 && bytes.Equal(wire, wires[i-1]) { + continue + } + buf = append(buf, wire...) + } + return buf, nil +} + +func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) { + // copied from zmsg.go RRSIG packing + off, err := packUint16(sw.TypeCovered, msg, 0) + if err != nil { + return off, err + } + off, err = packUint8(sw.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(sw.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(sw.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(sw.SignerName, msg, off, nil, false) + if err != nil { + return off, err + } + return off, nil +} + +func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) { + // copied from zmsg.go DNSKEY packing + off, err := packUint16(dw.Flags, msg, 0) + if err != nil { + return off, err + } + off, err = packUint8(dw.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(dw.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(dw.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go new file mode 100644 index 000000000000..229a079370b7 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_keygen.go @@ -0,0 +1,156 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "math/big" +) + +// Generate generates a DNSKEY of the given bit size. +// The public part is put inside the DNSKEY record. +// The Algorithm in the key must be set as this will define +// what kind of DNSKEY will be generated. +// The ECDSA algorithms imply a fixed keysize, in that case +// bits should be set to the size of the algorithm. +func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) { + switch k.Algorithm { + case DSA, DSANSEC3SHA1: + if bits != 1024 { + return nil, ErrKeySize + } + case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1: + if bits < 512 || bits > 4096 { + return nil, ErrKeySize + } + case RSASHA512: + if bits < 1024 || bits > 4096 { + return nil, ErrKeySize + } + case ECDSAP256SHA256: + if bits != 256 { + return nil, ErrKeySize + } + case ECDSAP384SHA384: + if bits != 384 { + return nil, ErrKeySize + } + } + + switch k.Algorithm { + case DSA, DSANSEC3SHA1: + params := new(dsa.Parameters) + if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil { + return nil, err + } + priv := new(dsa.PrivateKey) + priv.PublicKey.Parameters = *params + err := dsa.GenerateKey(priv, rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y) + return priv, nil + case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1: + priv, err := rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N) + return priv, nil + case ECDSAP256SHA256, ECDSAP384SHA384: + var c elliptic.Curve + switch k.Algorithm { + case ECDSAP256SHA256: + c = elliptic.P256() + case ECDSAP384SHA384: + c = elliptic.P384() + } + priv, err := ecdsa.GenerateKey(c, rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y) + return priv, nil + default: + return nil, ErrAlg + } +} + +// Set the public key (the value E and N) +func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool { + if _E == 0 || _N == nil { + return false + } + buf := exponentToBuf(_E) + buf = append(buf, _N.Bytes()...) + k.PublicKey = toBase64(buf) + return true +} + +// Set the public key for Elliptic Curves +func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool { + if _X == nil || _Y == nil { + return false + } + var intlen int + switch k.Algorithm { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen)) + return true +} + +// Set the public key for DSA +func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool { + if _Q == nil || _P == nil || _G == nil || _Y == nil { + return false + } + buf := dsaToBuf(_Q, _P, _G, _Y) + k.PublicKey = toBase64(buf) + return true +} + +// Set the public key (the values E and N) for RSA +// RFC 3110: Section 2. RSA Public KEY Resource Records +func exponentToBuf(_E int) []byte { + var buf []byte + i := big.NewInt(int64(_E)) + if len(i.Bytes()) < 256 { + buf = make([]byte, 1) + buf[0] = uint8(len(i.Bytes())) + } else { + buf = make([]byte, 3) + buf[0] = 0 + buf[1] = uint8(len(i.Bytes()) >> 8) + buf[2] = uint8(len(i.Bytes())) + } + buf = append(buf, i.Bytes()...) + return buf +} + +// Set the public key for X and Y for Curve. The two +// values are just concatenated. +func curveToBuf(_X, _Y *big.Int, intlen int) []byte { + buf := intToBytes(_X, intlen) + buf = append(buf, intToBytes(_Y, intlen)...) + return buf +} + +// Set the public key for X and Y for Curve. The two +// values are just concatenated. +func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte { + t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8) + buf := []byte{byte(t)} + buf = append(buf, intToBytes(_Q, 20)...) + buf = append(buf, intToBytes(_P, 64+t*8)...) + buf = append(buf, intToBytes(_G, 64+t*8)...) + buf = append(buf, intToBytes(_Y, 64+t*8)...) + return buf +} diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go new file mode 100644 index 000000000000..c0b54dc7640a --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go @@ -0,0 +1,249 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "io" + "math/big" + "strconv" + "strings" +) + +// NewPrivateKey returns a PrivateKey by parsing the string s. +// s should be in the same form of the BIND private key files. +func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) { + if s[len(s)-1] != '\n' { // We need a closing newline + return k.ReadPrivateKey(strings.NewReader(s+"\n"), "") + } + return k.ReadPrivateKey(strings.NewReader(s), "") +} + +// ReadPrivateKey reads a private key from the io.Reader q. The string file is +// only used in error reporting. +// The public key must be known, because some cryptographic algorithms embed +// the public inside the privatekey. +func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) { + m, err := parseKey(q, file) + if m == nil { + return nil, err + } + if _, ok := m["private-key-format"]; !ok { + return nil, ErrPrivKey + } + if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" { + return nil, ErrPrivKey + } + // TODO(mg): check if the pubkey matches the private key + algo, err := strconv.Atoi(strings.SplitN(m["algorithm"], " ", 2)[0]) + if err != nil { + return nil, ErrPrivKey + } + switch uint8(algo) { + case DSA: + priv, err := readPrivateKeyDSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyDSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case RSAMD5: + fallthrough + case RSASHA1: + fallthrough + case RSASHA1NSEC3SHA1: + fallthrough + case RSASHA256: + fallthrough + case RSASHA512: + priv, err := readPrivateKeyRSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyRSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case ECCGOST: + return nil, ErrPrivKey + case ECDSAP256SHA256: + fallthrough + case ECDSAP384SHA384: + priv, err := readPrivateKeyECDSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyECDSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + default: + return nil, ErrPrivKey + } +} + +// Read a private key (file) string and create a public key. Return the private key. +func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) { + p := new(rsa.PrivateKey) + p.Primes = []*big.Int{nil, nil} + for k, v := range m { + switch k { + case "modulus", "publicexponent", "privateexponent", "prime1", "prime2": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + switch k { + case "modulus": + p.PublicKey.N = big.NewInt(0) + p.PublicKey.N.SetBytes(v1) + case "publicexponent": + i := big.NewInt(0) + i.SetBytes(v1) + p.PublicKey.E = int(i.Int64()) // int64 should be large enough + case "privateexponent": + p.D = big.NewInt(0) + p.D.SetBytes(v1) + case "prime1": + p.Primes[0] = big.NewInt(0) + p.Primes[0].SetBytes(v1) + case "prime2": + p.Primes[1] = big.NewInt(0) + p.Primes[1].SetBytes(v1) + } + case "exponent1", "exponent2", "coefficient": + // not used in Go (yet) + case "created", "publish", "activate": + // not used in Go (yet) + } + } + return p, nil +} + +func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) { + p := new(dsa.PrivateKey) + p.X = big.NewInt(0) + for k, v := range m { + switch k { + case "private_value(x)": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + p.X.SetBytes(v1) + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) { + p := new(ecdsa.PrivateKey) + p.D = big.NewInt(0) + // TODO: validate that the required flags are present + for k, v := range m { + switch k { + case "privatekey": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + p.D.SetBytes(v1) + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +// parseKey reads a private key from r. It returns a map[string]string, +// with the key-value pairs, or an error when the file is not correct. +func parseKey(r io.Reader, file string) (map[string]string, error) { + s := scanInit(r) + m := make(map[string]string) + c := make(chan lex) + k := "" + // Start the lexer + go klexer(s, c) + for l := range c { + // It should alternate + switch l.value { + case zKey: + k = l.token + case zValue: + if k == "" { + return nil, &ParseError{file, "no private key seen", l} + } + //println("Setting", strings.ToLower(k), "to", l.token, "b") + m[strings.ToLower(k)] = l.token + k = "" + } + } + return m, nil +} + +// klexer scans the sourcefile and returns tokens on the channel c. +func klexer(s *scan, c chan lex) { + var l lex + str := "" // Hold the current read text + commt := false + key := true + x, err := s.tokenText() + defer close(c) + for err == nil { + l.column = s.position.Column + l.line = s.position.Line + switch x { + case ':': + if commt { + break + } + l.token = str + if key { + l.value = zKey + c <- l + // Next token is a space, eat it + s.tokenText() + key = false + str = "" + } else { + l.value = zValue + } + case ';': + commt = true + case '\n': + if commt { + // Reset a comment + commt = false + } + l.value = zValue + l.token = str + c <- l + str = "" + commt = false + key = true + default: + if commt { + break + } + str += string(x) + } + x, err = s.tokenText() + } + if len(str) > 0 { + // Send remainder + l.token = str + l.value = zValue + c <- l + } +} diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go new file mode 100644 index 000000000000..56f3ea934f63 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_privkey.go @@ -0,0 +1,85 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "math/big" + "strconv" +) + +const format = "Private-key-format: v1.3\n" + +// PrivateKeyString converts a PrivateKey to a string. This string has the same +// format as the private-key-file of BIND9 (Private-key-format: v1.3). +// It needs some info from the key (the algorithm), so its a method of the DNSKEY +// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey +func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string { + algorithm := strconv.Itoa(int(r.Algorithm)) + algorithm += " (" + AlgorithmToString[r.Algorithm] + ")" + + switch p := p.(type) { + case *rsa.PrivateKey: + modulus := toBase64(p.PublicKey.N.Bytes()) + e := big.NewInt(int64(p.PublicKey.E)) + publicExponent := toBase64(e.Bytes()) + privateExponent := toBase64(p.D.Bytes()) + prime1 := toBase64(p.Primes[0].Bytes()) + prime2 := toBase64(p.Primes[1].Bytes()) + // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm + // and from: http://code.google.com/p/go/issues/detail?id=987 + one := big.NewInt(1) + p1 := big.NewInt(0).Sub(p.Primes[0], one) + q1 := big.NewInt(0).Sub(p.Primes[1], one) + exp1 := big.NewInt(0).Mod(p.D, p1) + exp2 := big.NewInt(0).Mod(p.D, q1) + coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0]) + + exponent1 := toBase64(exp1.Bytes()) + exponent2 := toBase64(exp2.Bytes()) + coefficient := toBase64(coeff.Bytes()) + + return format + + "Algorithm: " + algorithm + "\n" + + "Modulus: " + modulus + "\n" + + "PublicExponent: " + publicExponent + "\n" + + "PrivateExponent: " + privateExponent + "\n" + + "Prime1: " + prime1 + "\n" + + "Prime2: " + prime2 + "\n" + + "Exponent1: " + exponent1 + "\n" + + "Exponent2: " + exponent2 + "\n" + + "Coefficient: " + coefficient + "\n" + + case *ecdsa.PrivateKey: + var intlen int + switch r.Algorithm { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + private := toBase64(intToBytes(p.D, intlen)) + return format + + "Algorithm: " + algorithm + "\n" + + "PrivateKey: " + private + "\n" + + case *dsa.PrivateKey: + T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8) + prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8)) + subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20)) + base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8)) + priv := toBase64(intToBytes(p.X, 20)) + pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8)) + return format + + "Algorithm: " + algorithm + "\n" + + "Prime(p): " + prime + "\n" + + "Subprime(q): " + subprime + "\n" + + "Base(g): " + base + "\n" + + "Private_value(x): " + priv + "\n" + + "Public_value(y): " + pub + "\n" + + default: + return "" + } +} diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go new file mode 100644 index 000000000000..f3555e433992 --- /dev/null +++ b/vendor/github.com/miekg/dns/doc.go @@ -0,0 +1,251 @@ +/* +Package dns implements a full featured interface to the Domain Name System. +Server- and client-side programming is supported. +The package allows complete control over what is send out to the DNS. The package +API follows the less-is-more principle, by presenting a small, clean interface. + +The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers, +TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing. +Note that domain names MUST be fully qualified, before sending them, unqualified +names in a message will result in a packing failure. + +Resource records are native types. They are not stored in wire format. +Basic usage pattern for creating a new resource record: + + r := new(dns.MX) + r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, + Class: dns.ClassINET, Ttl: 3600} + r.Preference = 10 + r.Mx = "mx.miek.nl." + +Or directly from a string: + + mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") + +Or when the default TTL (3600) and class (IN) suit you: + + mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.") + +Or even: + + mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") + +In the DNS messages are exchanged, these messages contain resource +records (sets). Use pattern for creating a message: + + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + +Or when not certain if the domain name is fully qualified: + + m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX) + +The message m is now a message with the question section set to ask +the MX records for the miek.nl. zone. + +The following is slightly more verbose, but more flexible: + + m1 := new(dns.Msg) + m1.Id = dns.Id() + m1.RecursionDesired = true + m1.Question = make([]dns.Question, 1) + m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} + +After creating a message it can be send. +Basic use pattern for synchronous querying the DNS at a +server configured on 127.0.0.1 and port 53: + + c := new(dns.Client) + in, rtt, err := c.Exchange(m1, "127.0.0.1:53") + +Suppressing multiple outstanding queries (with the same question, type and +class) is as easy as setting: + + c.SingleInflight = true + +If these "advanced" features are not needed, a simple UDP query can be send, +with: + + in, err := dns.Exchange(m1, "127.0.0.1:53") + +When this functions returns you will get dns message. A dns message consists +out of four sections. +The question section: in.Question, the answer section: in.Answer, +the authority section: in.Ns and the additional section: in.Extra. + +Each of these sections (except the Question section) contain a []RR. Basic +use pattern for accessing the rdata of a TXT RR as the first RR in +the Answer section: + + if t, ok := in.Answer[0].(*dns.TXT); ok { + // do something with t.Txt + } + +Domain Name and TXT Character String Representations + +Both domain names and TXT character strings are converted to presentation +form both when unpacked and when converted to strings. + +For TXT character strings, tabs, carriage returns and line feeds will be +converted to \t, \r and \n respectively. Back slashes and quotations marks +will be escaped. Bytes below 32 and above 127 will be converted to \DDD +form. + +For domain names, in addition to the above rules brackets, periods, +spaces, semicolons and the at symbol are escaped. + +DNSSEC + +DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It +uses public key cryptography to sign resource records. The +public keys are stored in DNSKEY records and the signatures in RRSIG records. + +Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit +to a request. + + m := new(dns.Msg) + m.SetEdns0(4096, true) + +Signature generation, signature verification and key generation are all supported. + +DYNAMIC UPDATES + +Dynamic updates reuses the DNS message format, but renames three of +the sections. Question is Zone, Answer is Prerequisite, Authority is +Update, only the Additional is not renamed. See RFC 2136 for the gory details. + +You can set a rather complex set of rules for the existence of absence of +certain resource records or names in a zone to specify if resource records +should be added or removed. The table from RFC 2136 supplemented with the Go +DNS function shows which functions exist to specify the prerequisites. + + 3.2.4 - Table Of Metavalues Used In Prerequisite Section + + CLASS TYPE RDATA Meaning Function + -------------------------------------------------------------- + ANY ANY empty Name is in use dns.NameUsed + ANY rrset empty RRset exists (value indep) dns.RRsetUsed + NONE ANY empty Name is not in use dns.NameNotUsed + NONE rrset empty RRset does not exist dns.RRsetNotUsed + zone rrset rr RRset exists (value dep) dns.Used + +The prerequisite section can also be left empty. +If you have decided on the prerequisites you can tell what RRs should +be added or deleted. The next table shows the options you have and +what functions to call. + + 3.4.2.6 - Table Of Metavalues Used In Update Section + + CLASS TYPE RDATA Meaning Function + --------------------------------------------------------------- + ANY ANY empty Delete all RRsets from name dns.RemoveName + ANY rrset empty Delete an RRset dns.RemoveRRset + NONE rrset rr Delete an RR from RRset dns.Remove + zone rrset rr Add to an RRset dns.Insert + +TRANSACTION SIGNATURE + +An TSIG or transaction signature adds a HMAC TSIG record to each message sent. +The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512. + +Basic use pattern when querying with a TSIG name "axfr." (note that these key names +must be fully qualified - as they are domain names) and the base64 secret +"so6ZGir4GPAqINNh9U5c3A==": + + c := new(dns.Client) + c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + ... + // When sending the TSIG RR is calculated and filled in before sending + +When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with +TSIG, this is the basic use pattern. In this example we request an AXFR for +miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A==" +and using the server 176.58.119.54: + + t := new(dns.Transfer) + m := new(dns.Msg) + t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + m.SetAxfr("miek.nl.") + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + c, err := t.In(m, "176.58.119.54:53") + for r := range c { ... } + +You can now read the records from the transfer as they come in. Each envelope is checked with TSIG. +If something is not correct an error is returned. + +Basic use pattern validating and replying to a message that has TSIG set. + + server := &dns.Server{Addr: ":53", Net: "udp"} + server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + go server.ListenAndServe() + dns.HandleFunc(".", handleRequest) + + func handleRequest(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + if r.IsTsig() != nil { + if w.TsigStatus() == nil { + // *Msg r has an TSIG record and it was validated + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + } else { + // *Msg r has an TSIG records and it was not valided + } + } + w.WriteMsg(m) + } + +PRIVATE RRS + +RFC 6895 sets aside a range of type codes for private use. This range +is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these +can be used, before requesting an official type code from IANA. + +see http://miek.nl/posts/2014/Sep/21/Private%20RRs%20and%20IDN%20in%20Go%20DNS/ for more +information. + +EDNS0 + +EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated +by RFC 6891. It defines an new RR type, the OPT RR, which is then completely +abused. +Basic use pattern for creating an (empty) OPT RR: + + o := new(dns.OPT) + o.Hdr.Name = "." // MUST be the root zone, per definition. + o.Hdr.Rrtype = dns.TypeOPT + +The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) +interfaces. Currently only a few have been standardized: EDNS0_NSID +(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note +that these options may be combined in an OPT RR. +Basic use pattern for a server to check if (and which) options are set: + + // o is a dns.OPT + for _, s := range o.Option { + switch e := s.(type) { + case *dns.EDNS0_NSID: + // do stuff with e.Nsid + case *dns.EDNS0_SUBNET: + // access e.Family, e.Address, etc. + } + } + +SIG(0) + +From RFC 2931: + + SIG(0) provides protection for DNS transactions and requests .... + ... protection for glue records, DNS requests, protection for message headers + on requests and responses, and protection of the overall integrity of a response. + +It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared +secret approach in TSIG. +Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and +RSASHA512. + +Signing subsequent messages in multi-message sessions is not implemented. +*/ +package dns diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go new file mode 100644 index 000000000000..7a58aa9b17b6 --- /dev/null +++ b/vendor/github.com/miekg/dns/edns.go @@ -0,0 +1,532 @@ +package dns + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "net" + "strconv" +) + +// EDNS0 Option codes. +const ( + EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 + EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt + EDNS0NSID = 0x3 // nsid (RFC5001) + EDNS0DAU = 0x5 // DNSSEC Algorithm Understood + EDNS0DHU = 0x6 // DS Hash Understood + EDNS0N3U = 0x7 // NSEC3 Hash Understood + EDNS0SUBNET = 0x8 // client-subnet (RFC6891) + EDNS0EXPIRE = 0x9 // EDNS0 expire + EDNS0COOKIE = 0xa // EDNS0 Cookie + EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET + EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891) + EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891) + _DO = 1 << 15 // dnssec ok +) + +// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. +// See RFC 6891. +type OPT struct { + Hdr RR_Header + Option []EDNS0 `dns:"opt"` +} + +func (rr *OPT) String() string { + s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; " + if rr.Do() { + s += "flags: do; " + } else { + s += "flags: ; " + } + s += "udp: " + strconv.Itoa(int(rr.UDPSize())) + + for _, o := range rr.Option { + switch o.(type) { + case *EDNS0_NSID: + s += "\n; NSID: " + o.String() + h, e := o.pack() + var r string + if e == nil { + for _, c := range h { + r += "(" + string(c) + ")" + } + s += " " + r + } + case *EDNS0_SUBNET: + s += "\n; SUBNET: " + o.String() + if o.(*EDNS0_SUBNET).DraftOption { + s += " (draft)" + } + case *EDNS0_COOKIE: + s += "\n; COOKIE: " + o.String() + case *EDNS0_UL: + s += "\n; UPDATE LEASE: " + o.String() + case *EDNS0_LLQ: + s += "\n; LONG LIVED QUERIES: " + o.String() + case *EDNS0_DAU: + s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String() + case *EDNS0_DHU: + s += "\n; DS HASH UNDERSTOOD: " + o.String() + case *EDNS0_N3U: + s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String() + case *EDNS0_LOCAL: + s += "\n; LOCAL OPT: " + o.String() + } + } + return s +} + +func (rr *OPT) len() int { + l := rr.Hdr.len() + for i := 0; i < len(rr.Option); i++ { + l += 4 // Account for 2-byte option code and 2-byte option length. + lo, _ := rr.Option[i].pack() + l += len(lo) + } + return l +} + +// return the old value -> delete SetVersion? + +// Version returns the EDNS version used. Only zero is defined. +func (rr *OPT) Version() uint8 { + return uint8((rr.Hdr.Ttl & 0x00FF0000) >> 16) +} + +// SetVersion sets the version of EDNS. This is usually zero. +func (rr *OPT) SetVersion(v uint8) { + rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | (uint32(v) << 16) +} + +// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL). +func (rr *OPT) ExtendedRcode() int { + return int((rr.Hdr.Ttl&0xFF000000)>>24) + 15 +} + +// SetExtendedRcode sets the EDNS extended RCODE field. +func (rr *OPT) SetExtendedRcode(v uint8) { + if v < RcodeBadVers { // Smaller than 16.. Use the 4 bits you have! + return + } + rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v-15) << 24) +} + +// UDPSize returns the UDP buffer size. +func (rr *OPT) UDPSize() uint16 { + return rr.Hdr.Class +} + +// SetUDPSize sets the UDP buffer size. +func (rr *OPT) SetUDPSize(size uint16) { + rr.Hdr.Class = size +} + +// Do returns the value of the DO (DNSSEC OK) bit. +func (rr *OPT) Do() bool { + return rr.Hdr.Ttl&_DO == _DO +} + +// SetDo sets the DO (DNSSEC OK) bit. +func (rr *OPT) SetDo() { + rr.Hdr.Ttl |= _DO +} + +// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it. +type EDNS0 interface { + // Option returns the option code for the option. + Option() uint16 + // pack returns the bytes of the option data. + pack() ([]byte, error) + // unpack sets the data as found in the buffer. Is also sets + // the length of the slice as the length of the option data. + unpack([]byte) error + // String returns the string representation of the option. + String() string +} + +// The nsid EDNS0 option is used to retrieve a nameserver +// identifier. When sending a request Nsid must be set to the empty string +// The identifier is an opaque string encoded as hex. +// Basic use pattern for creating an nsid option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_NSID) +// e.Code = dns.EDNS0NSID +// e.Nsid = "AA" +// o.Option = append(o.Option, e) +type EDNS0_NSID struct { + Code uint16 // Always EDNS0NSID + Nsid string // This string needs to be hex encoded +} + +func (e *EDNS0_NSID) pack() ([]byte, error) { + h, err := hex.DecodeString(e.Nsid) + if err != nil { + return nil, err + } + return h, nil +} + +func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } +func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil } +func (e *EDNS0_NSID) String() string { return string(e.Nsid) } + +// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver +// an idea of where the client lives. It can then give back a different +// answer depending on the location or network topology. +// Basic use pattern for creating an subnet option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_SUBNET) +// e.Code = dns.EDNS0SUBNET +// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 +// e.NetMask = 32 // 32 for IPV4, 128 for IPv6 +// e.SourceScope = 0 +// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4 +// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6 +// o.Option = append(o.Option, e) +// +// Note: the spec (draft-ietf-dnsop-edns-client-subnet-00) has some insane logic +// for which netmask applies to the address. This code will parse all the +// available bits when unpacking (up to optlen). When packing it will apply +// SourceNetmask. If you need more advanced logic, patches welcome and good luck. +type EDNS0_SUBNET struct { + Code uint16 // Always EDNS0SUBNET + Family uint16 // 1 for IP, 2 for IP6 + SourceNetmask uint8 + SourceScope uint8 + Address net.IP + DraftOption bool // Set to true if using the old (0x50fa) option code +} + +func (e *EDNS0_SUBNET) Option() uint16 { + if e.DraftOption { + return EDNS0SUBNETDRAFT + } + return EDNS0SUBNET +} + +func (e *EDNS0_SUBNET) pack() ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint16(b[0:], e.Family) + b[2] = e.SourceNetmask + b[3] = e.SourceScope + switch e.Family { + case 1: + if e.SourceNetmask > net.IPv4len*8 { + return nil, errors.New("dns: bad netmask") + } + if len(e.Address.To4()) != net.IPv4len { + return nil, errors.New("dns: bad address") + } + ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8)) + needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up + b = append(b, ip[:needLength]...) + case 2: + if e.SourceNetmask > net.IPv6len*8 { + return nil, errors.New("dns: bad netmask") + } + if len(e.Address) != net.IPv6len { + return nil, errors.New("dns: bad address") + } + ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8)) + needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up + b = append(b, ip[:needLength]...) + default: + return nil, errors.New("dns: bad address family") + } + return b, nil +} + +func (e *EDNS0_SUBNET) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Family = binary.BigEndian.Uint16(b) + e.SourceNetmask = b[2] + e.SourceScope = b[3] + switch e.Family { + case 1: + if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 { + return errors.New("dns: bad netmask") + } + addr := make([]byte, net.IPv4len) + for i := 0; i < net.IPv4len && 4+i < len(b); i++ { + addr[i] = b[4+i] + } + e.Address = net.IPv4(addr[0], addr[1], addr[2], addr[3]) + case 2: + if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 { + return errors.New("dns: bad netmask") + } + addr := make([]byte, net.IPv6len) + for i := 0; i < net.IPv6len && 4+i < len(b); i++ { + addr[i] = b[4+i] + } + e.Address = net.IP{addr[0], addr[1], addr[2], addr[3], addr[4], + addr[5], addr[6], addr[7], addr[8], addr[9], addr[10], + addr[11], addr[12], addr[13], addr[14], addr[15]} + default: + return errors.New("dns: bad address family") + } + return nil +} + +func (e *EDNS0_SUBNET) String() (s string) { + if e.Address == nil { + s = "" + } else if e.Address.To4() != nil { + s = e.Address.String() + } else { + s = "[" + e.Address.String() + "]" + } + s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope)) + return +} + +// The Cookie EDNS0 option +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_COOKIE) +// e.Code = dns.EDNS0COOKIE +// e.Cookie = "24a5ac.." +// o.Option = append(o.Option, e) +// +// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is +// always 8 bytes. It may then optionally be followed by the server cookie. The server +// cookie is of variable length, 8 to a maximum of 32 bytes. In other words: +// +// cCookie := o.Cookie[:16] +// sCookie := o.Cookie[16:] +// +// There is no guarantee that the Cookie string has a specific length. +type EDNS0_COOKIE struct { + Code uint16 // Always EDNS0COOKIE + Cookie string // Hex-encoded cookie data +} + +func (e *EDNS0_COOKIE) pack() ([]byte, error) { + h, err := hex.DecodeString(e.Cookie) + if err != nil { + return nil, err + } + return h, nil +} + +func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE } +func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil } +func (e *EDNS0_COOKIE) String() string { return e.Cookie } + +// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set +// an expiration on an update RR. This is helpful for clients that cannot clean +// up after themselves. This is a draft RFC and more information can be found at +// http://files.dns-sd.org/draft-sekar-dns-ul.txt +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_UL) +// e.Code = dns.EDNS0UL +// e.Lease = 120 // in seconds +// o.Option = append(o.Option, e) +type EDNS0_UL struct { + Code uint16 // Always EDNS0UL + Lease uint32 +} + +func (e *EDNS0_UL) Option() uint16 { return EDNS0UL } +func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) } + +// Copied: http://golang.org/src/pkg/net/dnsmsg.go +func (e *EDNS0_UL) pack() ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, e.Lease) + return b, nil +} + +func (e *EDNS0_UL) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Lease = binary.BigEndian.Uint32(b) + return nil +} + +// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 +// Implemented for completeness, as the EDNS0 type code is assigned. +type EDNS0_LLQ struct { + Code uint16 // Always EDNS0LLQ + Version uint16 + Opcode uint16 + Error uint16 + Id uint64 + LeaseLife uint32 +} + +func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ } + +func (e *EDNS0_LLQ) pack() ([]byte, error) { + b := make([]byte, 18) + binary.BigEndian.PutUint16(b[0:], e.Version) + binary.BigEndian.PutUint16(b[2:], e.Opcode) + binary.BigEndian.PutUint16(b[4:], e.Error) + binary.BigEndian.PutUint64(b[6:], e.Id) + binary.BigEndian.PutUint32(b[14:], e.LeaseLife) + return b, nil +} + +func (e *EDNS0_LLQ) unpack(b []byte) error { + if len(b) < 18 { + return ErrBuf + } + e.Version = binary.BigEndian.Uint16(b[0:]) + e.Opcode = binary.BigEndian.Uint16(b[2:]) + e.Error = binary.BigEndian.Uint16(b[4:]) + e.Id = binary.BigEndian.Uint64(b[6:]) + e.LeaseLife = binary.BigEndian.Uint32(b[14:]) + return nil +} + +func (e *EDNS0_LLQ) String() string { + s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) + + " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) + + " " + strconv.FormatUint(uint64(e.LeaseLife), 10) + return s +} + +type EDNS0_DAU struct { + Code uint16 // Always EDNS0DAU + AlgCode []uint8 +} + +func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU } +func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_DAU) String() string { + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := AlgorithmToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +type EDNS0_DHU struct { + Code uint16 // Always EDNS0DHU + AlgCode []uint8 +} + +func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU } +func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_DHU) String() string { + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := HashToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +type EDNS0_N3U struct { + Code uint16 // Always EDNS0N3U + AlgCode []uint8 +} + +func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U } +func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_N3U) String() string { + // Re-use the hash map + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := HashToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +type EDNS0_EXPIRE struct { + Code uint16 // Always EDNS0EXPIRE + Expire uint32 +} + +func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE } +func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) } + +func (e *EDNS0_EXPIRE) pack() ([]byte, error) { + b := make([]byte, 4) + b[0] = byte(e.Expire >> 24) + b[1] = byte(e.Expire >> 16) + b[2] = byte(e.Expire >> 8) + b[3] = byte(e.Expire) + return b, nil +} + +func (e *EDNS0_EXPIRE) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Expire = binary.BigEndian.Uint32(b) + return nil +} + +// The EDNS0_LOCAL option is used for local/experimental purposes. The option +// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND] +// (RFC6891), although any unassigned code can actually be used. The content of +// the option is made available in Data, unaltered. +// Basic use pattern for creating a local option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_LOCAL) +// e.Code = dns.EDNS0LOCALSTART +// e.Data = []byte{72, 82, 74} +// o.Option = append(o.Option, e) +type EDNS0_LOCAL struct { + Code uint16 + Data []byte +} + +func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } +func (e *EDNS0_LOCAL) String() string { + return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) +} + +func (e *EDNS0_LOCAL) pack() ([]byte, error) { + b := make([]byte, len(e.Data)) + copied := copy(b, e.Data) + if copied != len(e.Data) { + return nil, ErrBuf + } + return b, nil +} + +func (e *EDNS0_LOCAL) unpack(b []byte) error { + e.Data = make([]byte, len(b)) + copied := copy(e.Data, b) + if copied != len(b) { + return ErrBuf + } + return nil +} diff --git a/vendor/github.com/miekg/dns/format.go b/vendor/github.com/miekg/dns/format.go new file mode 100644 index 000000000000..3f5303c20138 --- /dev/null +++ b/vendor/github.com/miekg/dns/format.go @@ -0,0 +1,87 @@ +package dns + +import ( + "net" + "reflect" + "strconv" +) + +// NumField returns the number of rdata fields r has. +func NumField(r RR) int { + return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header +} + +// Field returns the rdata field i as a string. Fields are indexed starting from 1. +// RR types that holds slice data, for instance the NSEC type bitmap will return a single +// string where the types are concatenated using a space. +// Accessing non existing fields will cause a panic. +func Field(r RR, i int) string { + if i == 0 { + return "" + } + d := reflect.ValueOf(r).Elem().Field(i) + switch k := d.Kind(); k { + case reflect.String: + return d.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(d.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return strconv.FormatUint(d.Uint(), 10) + case reflect.Slice: + switch reflect.ValueOf(r).Elem().Type().Field(i).Tag { + case `dns:"a"`: + // TODO(miek): Hmm store this as 16 bytes + if d.Len() < net.IPv6len { + return net.IPv4(byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint())).String() + } + return net.IPv4(byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint())).String() + case `dns:"aaaa"`: + return net.IP{ + byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint()), + byte(d.Index(4).Uint()), + byte(d.Index(5).Uint()), + byte(d.Index(6).Uint()), + byte(d.Index(7).Uint()), + byte(d.Index(8).Uint()), + byte(d.Index(9).Uint()), + byte(d.Index(10).Uint()), + byte(d.Index(11).Uint()), + byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint()), + }.String() + case `dns:"nsec"`: + if d.Len() == 0 { + return "" + } + s := Type(d.Index(0).Uint()).String() + for i := 1; i < d.Len(); i++ { + s += " " + Type(d.Index(i).Uint()).String() + } + return s + default: + // if it does not have a tag its a string slice + fallthrough + case `dns:"txt"`: + if d.Len() == 0 { + return "" + } + s := d.Index(0).String() + for i := 1; i < d.Len(); i++ { + s += " " + d.Index(i).String() + } + return s + } + } + return "" +} diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go new file mode 100644 index 000000000000..e4481a4b0d8d --- /dev/null +++ b/vendor/github.com/miekg/dns/generate.go @@ -0,0 +1,159 @@ +package dns + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" +) + +// Parse the $GENERATE statement as used in BIND9 zones. +// See http://www.zytrax.com/books/dns/ch8/generate.html for instance. +// We are called after '$GENERATE '. After which we expect: +// * the range (12-24/2) +// * lhs (ownername) +// * [[ttl][class]] +// * type +// * rhs (rdata) +// But we are lazy here, only the range is parsed *all* occurrences +// of $ after that are interpreted. +// Any error are returned as a string value, the empty string signals +// "no error". +func generate(l lex, c chan lex, t chan *Token, o string) string { + step := 1 + if i := strings.IndexAny(l.token, "/"); i != -1 { + if i+1 == len(l.token) { + return "bad step in $GENERATE range" + } + if s, err := strconv.Atoi(l.token[i+1:]); err == nil { + if s < 0 { + return "bad step in $GENERATE range" + } + step = s + } else { + return "bad step in $GENERATE range" + } + l.token = l.token[:i] + } + sx := strings.SplitN(l.token, "-", 2) + if len(sx) != 2 { + return "bad start-stop in $GENERATE range" + } + start, err := strconv.Atoi(sx[0]) + if err != nil { + return "bad start in $GENERATE range" + } + end, err := strconv.Atoi(sx[1]) + if err != nil { + return "bad stop in $GENERATE range" + } + if end < 0 || start < 0 || end < start { + return "bad range in $GENERATE range" + } + + <-c // _BLANK + // Create a complete new string, which we then parse again. + s := "" +BuildRR: + l = <-c + if l.value != zNewline && l.value != zEOF { + s += l.token + goto BuildRR + } + for i := start; i <= end; i += step { + var ( + escape bool + dom bytes.Buffer + mod string + err error + offset int + ) + + for j := 0; j < len(s); j++ { // No 'range' because we need to jump around + switch s[j] { + case '\\': + if escape { + dom.WriteByte('\\') + escape = false + continue + } + escape = true + case '$': + mod = "%d" + offset = 0 + if escape { + dom.WriteByte('$') + escape = false + continue + } + escape = false + if j+1 >= len(s) { // End of the string + dom.WriteString(fmt.Sprintf(mod, i+offset)) + continue + } else { + if s[j+1] == '$' { + dom.WriteByte('$') + j++ + continue + } + } + // Search for { and } + if s[j+1] == '{' { // Modifier block + sep := strings.Index(s[j+2:], "}") + if sep == -1 { + return "bad modifier in $GENERATE" + } + mod, offset, err = modToPrintf(s[j+2 : j+2+sep]) + if err != nil { + return err.Error() + } + j += 2 + sep // Jump to it + } + dom.WriteString(fmt.Sprintf(mod, i+offset)) + default: + if escape { // Pretty useless here + escape = false + continue + } + dom.WriteByte(s[j]) + } + } + // Re-parse the RR and send it on the current channel t + rx, err := NewRR("$ORIGIN " + o + "\n" + dom.String()) + if err != nil { + return err.Error() + } + t <- &Token{RR: rx} + // Its more efficient to first built the rrlist and then parse it in + // one go! But is this a problem? + } + return "" +} + +// Convert a $GENERATE modifier 0,0,d to something Printf can deal with. +func modToPrintf(s string) (string, int, error) { + xs := strings.SplitN(s, ",", 3) + if len(xs) != 3 { + return "", 0, errors.New("bad modifier in $GENERATE") + } + // xs[0] is offset, xs[1] is width, xs[2] is base + if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" { + return "", 0, errors.New("bad base in $GENERATE") + } + offset, err := strconv.Atoi(xs[0]) + if err != nil || offset > 255 { + return "", 0, errors.New("bad offset in $GENERATE") + } + width, err := strconv.Atoi(xs[1]) + if err != nil || width > 255 { + return "", offset, errors.New("bad width in $GENERATE") + } + switch { + case width < 0: + return "", offset, errors.New("bad width in $GENERATE") + case width == 0: + return "%" + xs[1] + xs[2], offset, nil + } + return "%0" + xs[1] + xs[2], offset, nil +} diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go new file mode 100644 index 000000000000..fca5c7dd2d15 --- /dev/null +++ b/vendor/github.com/miekg/dns/labels.go @@ -0,0 +1,168 @@ +package dns + +// Holds a bunch of helper functions for dealing with labels. + +// SplitDomainName splits a name string into it's labels. +// www.miek.nl. returns []string{"www", "miek", "nl"} +// .www.miek.nl. returns []string{"", "www", "miek", "nl"}, +// The root label (.) returns nil. Note that using +// strings.Split(s) will work in most cases, but does not handle +// escaped dots (\.) for instance. +// s must be a syntactically valid domain name, see IsDomainName. +func SplitDomainName(s string) (labels []string) { + if len(s) == 0 { + return nil + } + fqdnEnd := 0 // offset of the final '.' or the length of the name + idx := Split(s) + begin := 0 + if s[len(s)-1] == '.' { + fqdnEnd = len(s) - 1 + } else { + fqdnEnd = len(s) + } + + switch len(idx) { + case 0: + return nil + case 1: + // no-op + default: + end := 0 + for i := 1; i < len(idx); i++ { + end = idx[i] + labels = append(labels, s[begin:end-1]) + begin = end + } + } + + labels = append(labels, s[begin:fqdnEnd]) + return labels +} + +// CompareDomainName compares the names s1 and s2 and +// returns how many labels they have in common starting from the *right*. +// The comparison stops at the first inequality. The names are not downcased +// before the comparison. +// +// www.miek.nl. and miek.nl. have two labels in common: miek and nl +// www.miek.nl. and www.bla.nl. have one label in common: nl +// +// s1 and s2 must be syntactically valid domain names. +func CompareDomainName(s1, s2 string) (n int) { + s1 = Fqdn(s1) + s2 = Fqdn(s2) + l1 := Split(s1) + l2 := Split(s2) + + // the first check: root label + if l1 == nil || l2 == nil { + return + } + + j1 := len(l1) - 1 // end + i1 := len(l1) - 2 // start + j2 := len(l2) - 1 + i2 := len(l2) - 2 + // the second check can be done here: last/only label + // before we fall through into the for-loop below + if s1[l1[j1]:] == s2[l2[j2]:] { + n++ + } else { + return + } + for { + if i1 < 0 || i2 < 0 { + break + } + if s1[l1[i1]:l1[j1]] == s2[l2[i2]:l2[j2]] { + n++ + } else { + break + } + j1-- + i1-- + j2-- + i2-- + } + return +} + +// CountLabel counts the the number of labels in the string s. +// s must be a syntactically valid domain name. +func CountLabel(s string) (labels int) { + if s == "." { + return + } + off := 0 + end := false + for { + off, end = NextLabel(s, off) + labels++ + if end { + return + } + } +} + +// Split splits a name s into its label indexes. +// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}. +// The root name (.) returns nil. Also see SplitDomainName. +// s must be a syntactically valid domain name. +func Split(s string) []int { + if s == "." { + return nil + } + idx := make([]int, 1, 3) + off := 0 + end := false + + for { + off, end = NextLabel(s, off) + if end { + return idx + } + idx = append(idx, off) + } +} + +// NextLabel returns the index of the start of the next label in the +// string s starting at offset. +// The bool end is true when the end of the string has been reached. +// Also see PrevLabel. +func NextLabel(s string, offset int) (i int, end bool) { + quote := false + for i = offset; i < len(s)-1; i++ { + switch s[i] { + case '\\': + quote = !quote + default: + quote = false + case '.': + if quote { + quote = !quote + continue + } + return i + 1, false + } + } + return i + 1, true +} + +// PrevLabel returns the index of the label when starting from the right and +// jumping n labels to the left. +// The bool start is true when the start of the string has been overshot. +// Also see NextLabel. +func PrevLabel(s string, n int) (i int, start bool) { + if n == 0 { + return len(s), false + } + lab := Split(s) + if lab == nil { + return 0, true + } + if n > len(lab) { + return 0, true + } + return lab[len(lab)-n], false +} diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go new file mode 100644 index 000000000000..ec2f7ab7bb7e --- /dev/null +++ b/vendor/github.com/miekg/dns/msg.go @@ -0,0 +1,1231 @@ +// DNS packet assembly, see RFC 1035. Converting from - Unpack() - +// and to - Pack() - wire format. +// All the packers and unpackers take a (msg []byte, off int) +// and return (off1 int, ok bool). If they return ok==false, they +// also return off1==len(msg), so that the next unpacker will +// also fail. This lets us avoid checks of ok until the end of a +// packing sequence. + +package dns + +//go:generate go run msg_generate.go + +import ( + crand "crypto/rand" + "encoding/binary" + "math/big" + "math/rand" + "strconv" +) + +func init() { + // Initialize default math/rand source using crypto/rand to provide better + // security without the performance trade-off. + buf := make([]byte, 8) + _, err := crand.Read(buf) + if err != nil { + // Failed to read from cryptographic source, fallback to default initial + // seed (1) by returning early + return + } + seed := binary.BigEndian.Uint64(buf) + rand.Seed(int64(seed)) +} + +const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer + +var ( + ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm. + ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication. + ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used it too small for the message. + ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being uses before it is initialized. + ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ... + ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot. + ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID. + ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid. + ErrKey error = &Error{err: "bad key"} + ErrKeySize error = &Error{err: "bad key size"} + ErrNoSig error = &Error{err: "no signature found"} + ErrPrivKey error = &Error{err: "bad private key"} + ErrRcode error = &Error{err: "bad rcode"} + ErrRdata error = &Error{err: "bad rdata"} + ErrRRset error = &Error{err: "bad rrset"} + ErrSecret error = &Error{err: "no secrets defined"} + ErrShortRead error = &Error{err: "short read"} + ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated. + ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers. + ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication. + ErrTruncated error = &Error{err: "failed to unpack truncated message"} // ErrTruncated indicates that we failed to unpack a truncated message. We unpacked as much as we had so Msg can still be used, if desired. +) + +// Id, by default, returns a 16 bits random number to be used as a +// message id. The random provided should be good enough. This being a +// variable the function can be reassigned to a custom function. +// For instance, to make it return a static value: +// +// dns.Id = func() uint16 { return 3 } +var Id func() uint16 = id + +// id returns a 16 bits random number to be used as a +// message id. The random provided should be good enough. +func id() uint16 { + id32 := rand.Uint32() + return uint16(id32) +} + +// MsgHdr is a a manually-unpacked version of (id, bits). +type MsgHdr struct { + Id uint16 + Response bool + Opcode int + Authoritative bool + Truncated bool + RecursionDesired bool + RecursionAvailable bool + Zero bool + AuthenticatedData bool + CheckingDisabled bool + Rcode int +} + +// Msg contains the layout of a DNS message. +type Msg struct { + MsgHdr + Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. + Question []Question // Holds the RR(s) of the question section. + Answer []RR // Holds the RR(s) of the answer section. + Ns []RR // Holds the RR(s) of the authority section. + Extra []RR // Holds the RR(s) of the additional section. +} + +// ClassToString is a maps Classes to strings for each CLASS wire type. +var ClassToString = map[uint16]string{ + ClassINET: "IN", + ClassCSNET: "CS", + ClassCHAOS: "CH", + ClassHESIOD: "HS", + ClassNONE: "NONE", + ClassANY: "ANY", +} + +// OpcodeToString maps Opcodes to strings. +var OpcodeToString = map[int]string{ + OpcodeQuery: "QUERY", + OpcodeIQuery: "IQUERY", + OpcodeStatus: "STATUS", + OpcodeNotify: "NOTIFY", + OpcodeUpdate: "UPDATE", +} + +// RcodeToString maps Rcodes to strings. +var RcodeToString = map[int]string{ + RcodeSuccess: "NOERROR", + RcodeFormatError: "FORMERR", + RcodeServerFailure: "SERVFAIL", + RcodeNameError: "NXDOMAIN", + RcodeNotImplemented: "NOTIMPL", + RcodeRefused: "REFUSED", + RcodeYXDomain: "YXDOMAIN", // See RFC 2136 + RcodeYXRrset: "YXRRSET", + RcodeNXRrset: "NXRRSET", + RcodeNotAuth: "NOTAUTH", + RcodeNotZone: "NOTZONE", + RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891 + // RcodeBadVers: "BADVERS", + RcodeBadKey: "BADKEY", + RcodeBadTime: "BADTIME", + RcodeBadMode: "BADMODE", + RcodeBadName: "BADNAME", + RcodeBadAlg: "BADALG", + RcodeBadTrunc: "BADTRUNC", + RcodeBadCookie: "BADCOOKIE", +} + +// Domain names are a sequence of counted strings +// split at the dots. They end with a zero-length string. + +// PackDomainName packs a domain name s into msg[off:]. +// If compression is wanted compress must be true and the compression +// map needs to hold a mapping between domain names and offsets +// pointing into msg. +func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + off1, _, err = packDomainName(s, msg, off, compression, compress) + return +} + +func packDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, labels int, err error) { + // special case if msg == nil + lenmsg := 256 + if msg != nil { + lenmsg = len(msg) + } + ls := len(s) + if ls == 0 { // Ok, for instance when dealing with update RR without any rdata. + return off, 0, nil + } + // If not fully qualified, error out, but only if msg == nil #ugly + switch { + case msg == nil: + if s[ls-1] != '.' { + s += "." + ls++ + } + case msg != nil: + if s[ls-1] != '.' { + return lenmsg, 0, ErrFqdn + } + } + // Each dot ends a segment of the name. + // We trade each dot byte for a length byte. + // Except for escaped dots (\.), which are normal dots. + // There is also a trailing zero. + + // Compression + nameoffset := -1 + pointer := -1 + // Emit sequence of counted strings, chopping at dots. + begin := 0 + bs := []byte(s) + roBs, bsFresh, escapedDot := s, true, false + for i := 0; i < ls; i++ { + if bs[i] == '\\' { + for j := i; j < ls-1; j++ { + bs[j] = bs[j+1] + } + ls-- + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + // check for \DDD + if i+2 < ls && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + bs[i] = dddToByte(bs[i:]) + for j := i + 1; j < ls-2; j++ { + bs[j] = bs[j+2] + } + ls -= 2 + } else if bs[i] == 't' { + bs[i] = '\t' + } else if bs[i] == 'r' { + bs[i] = '\r' + } else if bs[i] == 'n' { + bs[i] = '\n' + } + escapedDot = bs[i] == '.' + bsFresh = false + continue + } + + if bs[i] == '.' { + if i > 0 && bs[i-1] == '.' && !escapedDot { + // two dots back to back is not legal + return lenmsg, labels, ErrRdata + } + if i-begin >= 1<<6 { // top two bits of length must be clear + return lenmsg, labels, ErrRdata + } + // off can already (we're in a loop) be bigger than len(msg) + // this happens when a name isn't fully qualified + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + if msg != nil { + msg[off] = byte(i - begin) + } + offset := off + off++ + for j := begin; j < i; j++ { + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + if msg != nil { + msg[off] = bs[j] + } + off++ + } + if compress && !bsFresh { + roBs = string(bs) + bsFresh = true + } + // Don't try to compress '.' + if compress && roBs[begin:] != "." { + if p, ok := compression[roBs[begin:]]; !ok { + // Only offsets smaller than this can be used. + if offset < maxCompressionOffset { + compression[roBs[begin:]] = offset + } + } else { + // The first hit is the longest matching dname + // keep the pointer offset we get back and store + // the offset of the current name, because that's + // where we need to insert the pointer later + + // If compress is true, we're allowed to compress this dname + if pointer == -1 && compress { + pointer = p // Where to point to + nameoffset = offset // Where to point from + break + } + } + } + labels++ + begin = i + 1 + } + escapedDot = false + } + // Root label is special + if len(bs) == 1 && bs[0] == '.' { + return off, labels, nil + } + // If we did compression and we find something add the pointer here + if pointer != -1 { + // We have two bytes (14 bits) to put the pointer in + // if msg == nil, we will never do compression + binary.BigEndian.PutUint16(msg[nameoffset:], uint16(pointer^0xC000)) + off = nameoffset + 1 + goto End + } + if msg != nil && off < len(msg) { + msg[off] = 0 + } +End: + off++ + return off, labels, nil +} + +// Unpack a domain name. +// In addition to the simple sequences of counted strings above, +// domain names are allowed to refer to strings elsewhere in the +// packet, to avoid repeating common suffixes when returning +// many entries in a single domain. The pointers are marked +// by a length byte with the top two bits set. Ignoring those +// two bits, that byte and the next give a 14 bit offset from msg[0] +// where we should pick up the trail. +// Note that if we jump elsewhere in the packet, +// we return off1 == the offset after the first pointer we found, +// which is where the next record will start. +// In theory, the pointers are only allowed to jump backward. +// We let them jump anywhere and stop jumping after a while. + +// UnpackDomainName unpacks a domain name into a string. +func UnpackDomainName(msg []byte, off int) (string, int, error) { + s := make([]byte, 0, 64) + off1 := 0 + lenmsg := len(msg) + ptr := 0 // number of pointers followed +Loop: + for { + if off >= lenmsg { + return "", lenmsg, ErrBuf + } + c := int(msg[off]) + off++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // end of name + break Loop + } + // literal string + if off+c > lenmsg { + return "", lenmsg, ErrBuf + } + for j := off; j < off+c; j++ { + switch b := msg[j]; b { + case '.', '(', ')', ';', ' ', '@': + fallthrough + case '"', '\\': + s = append(s, '\\', b) + case '\t': + s = append(s, '\\', 't') + case '\r': + s = append(s, '\\', 'r') + default: + if b < 32 || b >= 127 { // unprintable use \DDD + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + } else { + s = append(s, b) + } + } + } + s = append(s, '.') + off += c + case 0xC0: + // pointer to somewhere else in msg. + // remember location after first ptr, + // since that's how many bytes we consumed. + // also, don't follow too many pointers -- + // maybe there's a loop. + if off >= lenmsg { + return "", lenmsg, ErrBuf + } + c1 := msg[off] + off++ + if ptr == 0 { + off1 = off + } + if ptr++; ptr > 10 { + return "", lenmsg, &Error{err: "too many compression pointers"} + } + off = (c^0xC0)<<8 | int(c1) + default: + // 0x80 and 0x40 are reserved + return "", lenmsg, ErrRdata + } + } + if ptr == 0 { + off1 = off + } + if len(s) == 0 { + s = []byte(".") + } + return string(s), off1, nil +} + +func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { + if len(txt) == 0 { + if offset >= len(msg) { + return offset, ErrBuf + } + msg[offset] = 0 + return offset, nil + } + var err error + for i := range txt { + if len(txt[i]) > len(tmp) { + return offset, ErrBuf + } + offset, err = packTxtString(txt[i], msg, offset, tmp) + if err != nil { + return offset, err + } + } + return offset, nil +} + +func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { + lenByteOffset := offset + if offset >= len(msg) || len(s) > len(tmp) { + return offset, ErrBuf + } + offset++ + bs := tmp[:len(s)] + copy(bs, s) + for i := 0; i < len(bs); i++ { + if len(msg) <= offset { + return offset, ErrBuf + } + if bs[i] == '\\' { + i++ + if i == len(bs) { + break + } + // check for \DDD + if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + msg[offset] = dddToByte(bs[i:]) + i += 2 + } else if bs[i] == 't' { + msg[offset] = '\t' + } else if bs[i] == 'r' { + msg[offset] = '\r' + } else if bs[i] == 'n' { + msg[offset] = '\n' + } else { + msg[offset] = bs[i] + } + } else { + msg[offset] = bs[i] + } + offset++ + } + l := offset - lenByteOffset - 1 + if l > 255 { + return offset, &Error{err: "string exceeded 255 bytes in txt"} + } + msg[lenByteOffset] = byte(l) + return offset, nil +} + +func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) { + if offset >= len(msg) || len(s) > len(tmp) { + return offset, ErrBuf + } + bs := tmp[:len(s)] + copy(bs, s) + for i := 0; i < len(bs); i++ { + if len(msg) <= offset { + return offset, ErrBuf + } + if bs[i] == '\\' { + i++ + if i == len(bs) { + break + } + // check for \DDD + if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + msg[offset] = dddToByte(bs[i:]) + i += 2 + } else { + msg[offset] = bs[i] + } + } else { + msg[offset] = bs[i] + } + offset++ + } + return offset, nil +} + +func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { + off = off0 + var s string + for off < len(msg) && err == nil { + s, off, err = unpackTxtString(msg, off) + if err == nil { + ss = append(ss, s) + } + } + return +} + +func unpackTxtString(msg []byte, offset int) (string, int, error) { + if offset+1 > len(msg) { + return "", offset, &Error{err: "overflow unpacking txt"} + } + l := int(msg[offset]) + if offset+l+1 > len(msg) { + return "", offset, &Error{err: "overflow unpacking txt"} + } + s := make([]byte, 0, l) + for _, b := range msg[offset+1 : offset+1+l] { + switch b { + case '"', '\\': + s = append(s, '\\', b) + case '\t': + s = append(s, `\t`...) + case '\r': + s = append(s, `\r`...) + case '\n': + s = append(s, `\n`...) + default: + if b < 32 || b > 127 { // unprintable + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + } else { + s = append(s, b) + } + } + } + offset += 1 + l + return string(s), offset, nil +} + +// Helpers for dealing with escaped bytes +func isDigit(b byte) bool { return b >= '0' && b <= '9' } + +func dddToByte(s []byte) byte { + return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) +} + +// Helper function for packing and unpacking +func intToBytes(i *big.Int, length int) []byte { + buf := i.Bytes() + if len(buf) < length { + b := make([]byte, length) + copy(b[length-len(buf):], buf) + return b + } + return buf +} + +// PackRR packs a resource record rr into msg[off:]. +// See PackDomainName for documentation about the compression. +func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + if rr == nil { + return len(msg), &Error{err: "nil rr"} + } + + off1, err = rr.pack(msg, off, compression, compress) + if err != nil { + return len(msg), err + } + // TODO(miek): Not sure if this is needed? If removed we can remove rawmsg.go as well. + if rawSetRdlength(msg, off, off1) { + return off1, nil + } + return off, ErrRdata +} + +// UnpackRR unpacks msg[off:] into an RR. +func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) { + h, off, msg, err := unpackHeader(msg, off) + if err != nil { + return nil, len(msg), err + } + end := off + int(h.Rdlength) + + if fn, known := typeToUnpack[h.Rrtype]; !known { + rr, off, err = unpackRFC3597(h, msg, off) + } else { + rr, off, err = fn(h, msg, off) + } + if off != end { + return &h, end, &Error{err: "bad rdlength"} + } + return rr, off, err +} + +// unpackRRslice unpacks msg[off:] into an []RR. +// If we cannot unpack the whole array, then it will return nil +func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) { + var r RR + // Optimistically make dst be the length that was sent + dst := make([]RR, 0, l) + for i := 0; i < l; i++ { + off1 := off + r, off, err = UnpackRR(msg, off) + if err != nil { + off = len(msg) + break + } + // If offset does not increase anymore, l is a lie + if off1 == off { + l = i + break + } + dst = append(dst, r) + } + if err != nil && off == len(msg) { + dst = nil + } + return dst, off, err +} + +// Convert a MsgHdr to a string, with dig-like headers: +// +//;; opcode: QUERY, status: NOERROR, id: 48404 +// +//;; flags: qr aa rd ra; +func (h *MsgHdr) String() string { + if h == nil { + return " MsgHdr" + } + + s := ";; opcode: " + OpcodeToString[h.Opcode] + s += ", status: " + RcodeToString[h.Rcode] + s += ", id: " + strconv.Itoa(int(h.Id)) + "\n" + + s += ";; flags:" + if h.Response { + s += " qr" + } + if h.Authoritative { + s += " aa" + } + if h.Truncated { + s += " tc" + } + if h.RecursionDesired { + s += " rd" + } + if h.RecursionAvailable { + s += " ra" + } + if h.Zero { // Hmm + s += " z" + } + if h.AuthenticatedData { + s += " ad" + } + if h.CheckingDisabled { + s += " cd" + } + + s += ";" + return s +} + +// Pack packs a Msg: it is converted to to wire format. +// If the dns.Compress is true the message will be in compressed wire format. +func (dns *Msg) Pack() (msg []byte, err error) { + return dns.PackBuffer(nil) +} + +// PackBuffer packs a Msg, using the given buffer buf. If buf is too small +// a new buffer is allocated. +func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) { + // We use a similar function in tsig.go's stripTsig. + var ( + dh Header + compression map[string]int + ) + + if dns.Compress { + compression = make(map[string]int) // Compression pointer mappings + } + + if dns.Rcode < 0 || dns.Rcode > 0xFFF { + return nil, ErrRcode + } + if dns.Rcode > 0xF { + // Regular RCODE field is 4 bits + opt := dns.IsEdns0() + if opt == nil { + return nil, ErrExtendedRcode + } + opt.SetExtendedRcode(uint8(dns.Rcode >> 4)) + dns.Rcode &= 0xF + } + + // Convert convenient Msg into wire-like Header. + dh.Id = dns.Id + dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode) + if dns.Response { + dh.Bits |= _QR + } + if dns.Authoritative { + dh.Bits |= _AA + } + if dns.Truncated { + dh.Bits |= _TC + } + if dns.RecursionDesired { + dh.Bits |= _RD + } + if dns.RecursionAvailable { + dh.Bits |= _RA + } + if dns.Zero { + dh.Bits |= _Z + } + if dns.AuthenticatedData { + dh.Bits |= _AD + } + if dns.CheckingDisabled { + dh.Bits |= _CD + } + + // Prepare variable sized arrays. + question := dns.Question + answer := dns.Answer + ns := dns.Ns + extra := dns.Extra + + dh.Qdcount = uint16(len(question)) + dh.Ancount = uint16(len(answer)) + dh.Nscount = uint16(len(ns)) + dh.Arcount = uint16(len(extra)) + + // We need the uncompressed length here, because we first pack it and then compress it. + msg = buf + compress := dns.Compress + dns.Compress = false + if packLen := dns.Len() + 1; len(msg) < packLen { + msg = make([]byte, packLen) + } + dns.Compress = compress + + // Pack it in: header and then the pieces. + off := 0 + off, err = dh.pack(msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + for i := 0; i < len(question); i++ { + off, err = question[i].pack(msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(answer); i++ { + off, err = PackRR(answer[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(ns); i++ { + off, err = PackRR(ns[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(extra); i++ { + off, err = PackRR(extra[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + return msg[:off], nil +} + +// Unpack unpacks a binary message to a Msg structure. +func (dns *Msg) Unpack(msg []byte) (err error) { + var ( + dh Header + off int + ) + if dh, off, err = unpackMsgHdr(msg, off); err != nil { + return err + } + if off == len(msg) { + return ErrTruncated + } + + dns.Id = dh.Id + dns.Response = (dh.Bits & _QR) != 0 + dns.Opcode = int(dh.Bits>>11) & 0xF + dns.Authoritative = (dh.Bits & _AA) != 0 + dns.Truncated = (dh.Bits & _TC) != 0 + dns.RecursionDesired = (dh.Bits & _RD) != 0 + dns.RecursionAvailable = (dh.Bits & _RA) != 0 + dns.Zero = (dh.Bits & _Z) != 0 + dns.AuthenticatedData = (dh.Bits & _AD) != 0 + dns.CheckingDisabled = (dh.Bits & _CD) != 0 + dns.Rcode = int(dh.Bits & 0xF) + + // Optimistically use the count given to us in the header + dns.Question = make([]Question, 0, int(dh.Qdcount)) + + for i := 0; i < int(dh.Qdcount); i++ { + off1 := off + var q Question + q, off, err = unpackQuestion(msg, off) + if err != nil { + // Even if Truncated is set, we only will set ErrTruncated if we + // actually got the questions + return err + } + if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie! + dh.Qdcount = uint16(i) + break + } + dns.Question = append(dns.Question, q) + } + + dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off) + // The header counts might have been wrong so we need to update it + dh.Ancount = uint16(len(dns.Answer)) + if err == nil { + dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off) + } + // The header counts might have been wrong so we need to update it + dh.Nscount = uint16(len(dns.Ns)) + if err == nil { + dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off) + } + // The header counts might have been wrong so we need to update it + dh.Arcount = uint16(len(dns.Extra)) + + if off != len(msg) { + // TODO(miek) make this an error? + // use PackOpt to let people tell how detailed the error reporting should be? + // println("dns: extra bytes in dns packet", off, "<", len(msg)) + } else if dns.Truncated { + // Whether we ran into a an error or not, we want to return that it + // was truncated + err = ErrTruncated + } + return err +} + +// Convert a complete message to a string with dig-like output. +func (dns *Msg) String() string { + if dns == nil { + return " MsgHdr" + } + s := dns.MsgHdr.String() + " " + s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " + s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " + s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " + s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" + if len(dns.Question) > 0 { + s += "\n;; QUESTION SECTION:\n" + for i := 0; i < len(dns.Question); i++ { + s += dns.Question[i].String() + "\n" + } + } + if len(dns.Answer) > 0 { + s += "\n;; ANSWER SECTION:\n" + for i := 0; i < len(dns.Answer); i++ { + if dns.Answer[i] != nil { + s += dns.Answer[i].String() + "\n" + } + } + } + if len(dns.Ns) > 0 { + s += "\n;; AUTHORITY SECTION:\n" + for i := 0; i < len(dns.Ns); i++ { + if dns.Ns[i] != nil { + s += dns.Ns[i].String() + "\n" + } + } + } + if len(dns.Extra) > 0 { + s += "\n;; ADDITIONAL SECTION:\n" + for i := 0; i < len(dns.Extra); i++ { + if dns.Extra[i] != nil { + s += dns.Extra[i].String() + "\n" + } + } + } + return s +} + +// Len returns the message length when in (un)compressed wire format. +// If dns.Compress is true compression it is taken into account. Len() +// is provided to be a faster way to get the size of the resulting packet, +// than packing it, measuring the size and discarding the buffer. +func (dns *Msg) Len() int { + // We always return one more than needed. + l := 12 // Message header is always 12 bytes + var compression map[string]int + if dns.Compress { + compression = make(map[string]int) + } + for i := 0; i < len(dns.Question); i++ { + l += dns.Question[i].len() + if dns.Compress { + compressionLenHelper(compression, dns.Question[i].Name) + } + } + for i := 0; i < len(dns.Answer); i++ { + if dns.Answer[i] == nil { + continue + } + l += dns.Answer[i].len() + if dns.Compress { + k, ok := compressionLenSearch(compression, dns.Answer[i].Header().Name) + if ok { + l += 1 - k + } + compressionLenHelper(compression, dns.Answer[i].Header().Name) + k, ok = compressionLenSearchType(compression, dns.Answer[i]) + if ok { + l += 1 - k + } + compressionLenHelperType(compression, dns.Answer[i]) + } + } + for i := 0; i < len(dns.Ns); i++ { + if dns.Ns[i] == nil { + continue + } + l += dns.Ns[i].len() + if dns.Compress { + k, ok := compressionLenSearch(compression, dns.Ns[i].Header().Name) + if ok { + l += 1 - k + } + compressionLenHelper(compression, dns.Ns[i].Header().Name) + k, ok = compressionLenSearchType(compression, dns.Ns[i]) + if ok { + l += 1 - k + } + compressionLenHelperType(compression, dns.Ns[i]) + } + } + for i := 0; i < len(dns.Extra); i++ { + if dns.Extra[i] == nil { + continue + } + l += dns.Extra[i].len() + if dns.Compress { + k, ok := compressionLenSearch(compression, dns.Extra[i].Header().Name) + if ok { + l += 1 - k + } + compressionLenHelper(compression, dns.Extra[i].Header().Name) + k, ok = compressionLenSearchType(compression, dns.Extra[i]) + if ok { + l += 1 - k + } + compressionLenHelperType(compression, dns.Extra[i]) + } + } + return l +} + +// Put the parts of the name in the compression map. +func compressionLenHelper(c map[string]int, s string) { + pref := "" + lbs := Split(s) + for j := len(lbs) - 1; j >= 0; j-- { + pref = s[lbs[j]:] + if _, ok := c[pref]; !ok { + c[pref] = len(pref) + } + } +} + +// Look for each part in the compression map and returns its length, +// keep on searching so we get the longest match. +func compressionLenSearch(c map[string]int, s string) (int, bool) { + off := 0 + end := false + if s == "" { // don't bork on bogus data + return 0, false + } + for { + if _, ok := c[s[off:]]; ok { + return len(s[off:]), true + } + if end { + break + } + off, end = NextLabel(s, off) + } + return 0, false +} + +// TODO(miek): should add all types, because the all can be *used* for compression. Autogenerate from msg_generate and put in zmsg.go +func compressionLenHelperType(c map[string]int, r RR) { + switch x := r.(type) { + case *NS: + compressionLenHelper(c, x.Ns) + case *MX: + compressionLenHelper(c, x.Mx) + case *CNAME: + compressionLenHelper(c, x.Target) + case *PTR: + compressionLenHelper(c, x.Ptr) + case *SOA: + compressionLenHelper(c, x.Ns) + compressionLenHelper(c, x.Mbox) + case *MB: + compressionLenHelper(c, x.Mb) + case *MG: + compressionLenHelper(c, x.Mg) + case *MR: + compressionLenHelper(c, x.Mr) + case *MF: + compressionLenHelper(c, x.Mf) + case *MD: + compressionLenHelper(c, x.Md) + case *RT: + compressionLenHelper(c, x.Host) + case *RP: + compressionLenHelper(c, x.Mbox) + compressionLenHelper(c, x.Txt) + case *MINFO: + compressionLenHelper(c, x.Rmail) + compressionLenHelper(c, x.Email) + case *AFSDB: + compressionLenHelper(c, x.Hostname) + case *SRV: + compressionLenHelper(c, x.Target) + case *NAPTR: + compressionLenHelper(c, x.Replacement) + case *RRSIG: + compressionLenHelper(c, x.SignerName) + case *NSEC: + compressionLenHelper(c, x.NextDomain) + // HIP? + } +} + +// Only search on compressing these types. +func compressionLenSearchType(c map[string]int, r RR) (int, bool) { + switch x := r.(type) { + case *NS: + return compressionLenSearch(c, x.Ns) + case *MX: + return compressionLenSearch(c, x.Mx) + case *CNAME: + return compressionLenSearch(c, x.Target) + case *DNAME: + return compressionLenSearch(c, x.Target) + case *PTR: + return compressionLenSearch(c, x.Ptr) + case *SOA: + k, ok := compressionLenSearch(c, x.Ns) + k1, ok1 := compressionLenSearch(c, x.Mbox) + if !ok && !ok1 { + return 0, false + } + return k + k1, true + case *MB: + return compressionLenSearch(c, x.Mb) + case *MG: + return compressionLenSearch(c, x.Mg) + case *MR: + return compressionLenSearch(c, x.Mr) + case *MF: + return compressionLenSearch(c, x.Mf) + case *MD: + return compressionLenSearch(c, x.Md) + case *RT: + return compressionLenSearch(c, x.Host) + case *MINFO: + k, ok := compressionLenSearch(c, x.Rmail) + k1, ok1 := compressionLenSearch(c, x.Email) + if !ok && !ok1 { + return 0, false + } + return k + k1, true + case *AFSDB: + return compressionLenSearch(c, x.Hostname) + } + return 0, false +} + +// Copy returns a new RR which is a deep-copy of r. +func Copy(r RR) RR { r1 := r.copy(); return r1 } + +// Len returns the length (in octets) of the uncompressed RR in wire format. +func Len(r RR) int { return r.len() } + +// Copy returns a new *Msg which is a deep-copy of dns. +func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) } + +// CopyTo copies the contents to the provided message using a deep-copy and returns the copy. +func (dns *Msg) CopyTo(r1 *Msg) *Msg { + r1.MsgHdr = dns.MsgHdr + r1.Compress = dns.Compress + + if len(dns.Question) > 0 { + r1.Question = make([]Question, len(dns.Question)) + copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy + } + + rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) + var rri int + + if len(dns.Answer) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Answer); i++ { + rrArr[rri] = dns.Answer[i].copy() + rri++ + } + r1.Answer = rrArr[rrbegin:rri:rri] + } + + if len(dns.Ns) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Ns); i++ { + rrArr[rri] = dns.Ns[i].copy() + rri++ + } + r1.Ns = rrArr[rrbegin:rri:rri] + } + + if len(dns.Extra) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Extra); i++ { + rrArr[rri] = dns.Extra[i].copy() + rri++ + } + r1.Extra = rrArr[rrbegin:rri:rri] + } + + return r1 +} + +func (q *Question) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := PackDomainName(q.Name, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint16(q.Qtype, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(q.Qclass, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func unpackQuestion(msg []byte, off int) (Question, int, error) { + var ( + q Question + err error + ) + q.Name, off, err = UnpackDomainName(msg, off) + if err != nil { + return q, off, err + } + if off == len(msg) { + return q, off, nil + } + q.Qtype, off, err = unpackUint16(msg, off) + if err != nil { + return q, off, err + } + if off == len(msg) { + return q, off, nil + } + q.Qclass, off, err = unpackUint16(msg, off) + if off == len(msg) { + return q, off, nil + } + return q, off, err +} + +func (dh *Header) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := packUint16(dh.Id, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Bits, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Qdcount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Ancount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Nscount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Arcount, msg, off) + return off, err +} + +func unpackMsgHdr(msg []byte, off int) (Header, int, error) { + var ( + dh Header + err error + ) + dh.Id, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Bits, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Qdcount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Ancount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Nscount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Arcount, off, err = unpackUint16(msg, off) + return dh, off, err +} diff --git a/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/miekg/dns/msg_generate.go new file mode 100644 index 000000000000..166b3af00c83 --- /dev/null +++ b/vendor/github.com/miekg/dns/msg_generate.go @@ -0,0 +1,337 @@ +//+build ignore + +// msg_generate.go is meant to run with go generate. It will use +// go/{importer,types} to track down all the RR struct types. Then for each type +// it will generate pack/unpack methods based on the struct tags. The generated source is +// written to zmsg.go, and is meant to be checked into git. +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/importer" + "go/types" + "log" + "os" + "strings" +) + +var packageHdr = ` +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from msg_generate.go + +package dns + +` + +// getTypeStruct will take a type and the package scope, and return the +// (innermost) struct if the type is considered a RR type (currently defined as +// those structs beginning with a RR_Header, could be redefined as implementing +// the RR interface). The bool return value indicates if embedded structs were +// resolved. +func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { + st, ok := t.Underlying().(*types.Struct) + if !ok { + return nil, false + } + if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { + return st, false + } + if st.Field(0).Anonymous() { + st, _ := getTypeStruct(st.Field(0).Type(), scope) + return st, true + } + return nil, false +} + +func main() { + // Import and type-check the package + pkg, err := importer.Default().Import("github.com/miekg/dns") + fatalIfErr(err) + scope := pkg.Scope() + + // Collect actual types (*X) + var namedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + if st, _ := getTypeStruct(o.Type(), scope); st == nil { + continue + } + if name == "PrivateRR" { + continue + } + + // Check if corresponding TypeX exists + if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { + log.Fatalf("Constant Type%s does not exist.", o.Name()) + } + + namedTypes = append(namedTypes, o.Name()) + } + + b := &bytes.Buffer{} + b.WriteString(packageHdr) + + fmt.Fprint(b, "// pack*() functions\n\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, _ := getTypeStruct(o.Type(), scope) + + fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {\n", name) + fmt.Fprint(b, `off, err := rr.Hdr.pack(msg, off, compression, compress) +if err != nil { + return off, err +} +headerEnd := off +`) + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { + fmt.Fprintf(b, s, st.Field(i).Name()) + fmt.Fprint(b, `if err != nil { +return off, err +} +`) + } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"txt"`: + o("off, err = packStringTxt(rr.%s, msg, off)\n") + case `dns:"opt"`: + o("off, err = packDataOpt(rr.%s, msg, off)\n") + case `dns:"nsec"`: + o("off, err = packDataNsec(rr.%s, msg, off)\n") + case `dns:"domain-name"`: + o("off, err = packDataDomainNames(rr.%s, msg, off, compression, compress)\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch { + case st.Tag(i) == `dns:"-"`: // ignored + case st.Tag(i) == `dns:"cdomain-name"`: + fallthrough + case st.Tag(i) == `dns:"domain-name"`: + o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n") + case st.Tag(i) == `dns:"a"`: + o("off, err = packDataA(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"aaaa"`: + o("off, err = packDataAAAA(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"uint48"`: + o("off, err = packUint48(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"txt"`: + o("off, err = packString(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32 + fallthrough + case st.Tag(i) == `dns:"base32"`: + o("off, err = packStringBase32(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64 + fallthrough + case st.Tag(i) == `dns:"base64"`: + o("off, err = packStringBase64(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex + fallthrough + case st.Tag(i) == `dns:"hex"`: + o("off, err = packStringHex(rr.%s, msg, off)\n") + + case st.Tag(i) == `dns:"octet"`: + o("off, err = packStringOctet(rr.%s, msg, off)\n") + case st.Tag(i) == "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("off, err = packUint8(rr.%s, msg, off)\n") + case types.Uint16: + o("off, err = packUint16(rr.%s, msg, off)\n") + case types.Uint32: + o("off, err = packUint32(rr.%s, msg, off)\n") + case types.Uint64: + o("off, err = packUint64(rr.%s, msg, off)\n") + case types.String: + o("off, err = packString(rr.%s, msg, off)\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + } + // We have packed everything, only now we know the rdlength of this RR + fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off- headerEnd)") + fmt.Fprintln(b, "return off, nil }\n") + } + + fmt.Fprint(b, "// unpack*() functions\n\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, _ := getTypeStruct(o.Type(), scope) + + fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name) + fmt.Fprintf(b, "rr := new(%s)\n", name) + fmt.Fprint(b, "rr.Hdr = h\n") + fmt.Fprint(b, `if noRdata(h) { +return rr, off, nil + } +var err error +rdStart := off +_ = rdStart + +`) + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { + fmt.Fprintf(b, s, st.Field(i).Name()) + fmt.Fprint(b, `if err != nil { +return rr, off, err +} +`) + } + + // size-* are special, because they reference a struct member we should use for the length. + if strings.HasPrefix(st.Tag(i), `dns:"size-`) { + structMember := structMember(st.Tag(i)) + structTag := structTag(st.Tag(i)) + switch structTag { + case "hex": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + case "base32": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + case "base64": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + fmt.Fprint(b, `if err != nil { +return rr, off, err +} +`) + continue + } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"txt"`: + o("rr.%s, off, err = unpackStringTxt(msg, off)\n") + case `dns:"opt"`: + o("rr.%s, off, err = unpackDataOpt(msg, off)\n") + case `dns:"nsec"`: + o("rr.%s, off, err = unpackDataNsec(msg, off)\n") + case `dns:"domain-name"`: + o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"cdomain-name"`: + fallthrough + case `dns:"domain-name"`: + o("rr.%s, off, err = UnpackDomainName(msg, off)\n") + case `dns:"a"`: + o("rr.%s, off, err = unpackDataA(msg, off)\n") + case `dns:"aaaa"`: + o("rr.%s, off, err = unpackDataAAAA(msg, off)\n") + case `dns:"uint48"`: + o("rr.%s, off, err = unpackUint48(msg, off)\n") + case `dns:"txt"`: + o("rr.%s, off, err = unpackString(msg, off)\n") + case `dns:"base32"`: + o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"base64"`: + o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"hex"`: + o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"octet"`: + o("rr.%s, off, err = unpackStringOctet(msg, off)\n") + case "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("rr.%s, off, err = unpackUint8(msg, off)\n") + case types.Uint16: + o("rr.%s, off, err = unpackUint16(msg, off)\n") + case types.Uint32: + o("rr.%s, off, err = unpackUint32(msg, off)\n") + case types.Uint64: + o("rr.%s, off, err = unpackUint64(msg, off)\n") + case types.String: + o("rr.%s, off, err = unpackString(msg, off)\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + // If we've hit len(msg) we return without error. + if i < st.NumFields()-1 { + fmt.Fprintf(b, `if off == len(msg) { +return rr, off, nil + } +`) + } + } + fmt.Fprintf(b, "return rr, off, err }\n\n") + } + // Generate typeToUnpack map + fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){") + for _, name := range namedTypes { + if name == "RFC3597" { + continue + } + fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name) + } + fmt.Fprintln(b, "}\n") + + // gofmt + res, err := format.Source(b.Bytes()) + if err != nil { + b.WriteTo(os.Stderr) + log.Fatal(err) + } + + // write result + f, err := os.Create("zmsg.go") + fatalIfErr(err) + defer f.Close() + f.Write(res) +} + +// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string. +func structMember(s string) string { + fields := strings.Split(s, ":") + if len(fields) == 0 { + return "" + } + f := fields[len(fields)-1] + // f should have a closing " + if len(f) > 1 { + return f[:len(f)-1] + } + return f +} + +// structTag will take a tag like dns:"size-base32:SaltLength" and return base32. +func structTag(s string) string { + fields := strings.Split(s, ":") + if len(fields) < 2 { + return "" + } + return fields[1][len("\"size-"):] +} + +func fatalIfErr(err error) { + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go new file mode 100644 index 000000000000..e7a9500cc074 --- /dev/null +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -0,0 +1,630 @@ +package dns + +import ( + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "net" + "strconv" +) + +// helper functions called from the generated zmsg.go + +// These function are named after the tag to help pack/unpack, if there is no tag it is the name +// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or +// packDataDomainName. + +func unpackDataA(msg []byte, off int) (net.IP, int, error) { + if off+net.IPv4len > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking a"} + } + a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...) + off += net.IPv4len + return a, off, nil +} + +func packDataA(a net.IP, msg []byte, off int) (int, error) { + // It must be a slice of 4, even if it is 16, we encode only the first 4 + if off+net.IPv4len > len(msg) { + return len(msg), &Error{err: "overflow packing a"} + } + switch len(a) { + case net.IPv4len, net.IPv6len: + copy(msg[off:], a.To4()) + off += net.IPv4len + case 0: + // Allowed, for dynamic updates. + default: + return len(msg), &Error{err: "overflow packing a"} + } + return off, nil +} + +func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) { + if off+net.IPv6len > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking aaaa"} + } + aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...) + off += net.IPv6len + return aaaa, off, nil +} + +func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) { + if off+net.IPv6len > len(msg) { + return len(msg), &Error{err: "overflow packing aaaa"} + } + + switch len(aaaa) { + case net.IPv6len: + copy(msg[off:], aaaa) + off += net.IPv6len + case 0: + // Allowed, dynamic updates. + default: + return len(msg), &Error{err: "overflow packing aaaa"} + } + return off, nil +} + +// unpackHeader unpacks an RR header, returning the offset to the end of the header and a +// re-sliced msg according to the expected length of the RR. +func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) { + hdr := RR_Header{} + if off == len(msg) { + return hdr, off, msg, nil + } + + hdr.Name, off, err = UnpackDomainName(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Rrtype, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Class, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Ttl, off, err = unpackUint32(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Rdlength, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength) + return hdr, off, msg, nil +} + +// pack packs an RR header, returning the offset to the end of the header. +// See PackDomainName for documentation about the compression. +func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + if off == len(msg) { + return off, nil + } + + off, err = PackDomainName(hdr.Name, msg, off, compression, compress) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Rrtype, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Class, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint32(hdr.Ttl, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Rdlength, msg, off) + if err != nil { + return len(msg), err + } + return off, nil +} + +// helper helper functions. + +// truncateMsgFromRdLength truncates msg to match the expected length of the RR. +// Returns an error if msg is smaller than the expected size. +func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) { + lenrd := off + int(rdlength) + if lenrd > len(msg) { + return msg, &Error{err: "overflowing header size"} + } + return msg[:lenrd], nil +} + +func fromBase32(s []byte) (buf []byte, err error) { + buflen := base32.HexEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base32.HexEncoding.Decode(buf, s) + buf = buf[:n] + return +} + +func toBase32(b []byte) string { return base32.HexEncoding.EncodeToString(b) } + +func fromBase64(s []byte) (buf []byte, err error) { + buflen := base64.StdEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base64.StdEncoding.Decode(buf, s) + buf = buf[:n] + return +} + +func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) } + +// dynamicUpdate returns true if the Rdlength is zero. +func noRdata(h RR_Header) bool { return h.Rdlength == 0 } + +func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) { + if off+1 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint8"} + } + return uint8(msg[off]), off + 1, nil +} + +func packUint8(i uint8, msg []byte, off int) (off1 int, err error) { + if off+1 > len(msg) { + return len(msg), &Error{err: "overflow packing uint8"} + } + msg[off] = byte(i) + return off + 1, nil +} + +func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) { + if off+2 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint16"} + } + return binary.BigEndian.Uint16(msg[off:]), off + 2, nil +} + +func packUint16(i uint16, msg []byte, off int) (off1 int, err error) { + if off+2 > len(msg) { + return len(msg), &Error{err: "overflow packing uint16"} + } + binary.BigEndian.PutUint16(msg[off:], i) + return off + 2, nil +} + +func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) { + if off+4 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint32"} + } + return binary.BigEndian.Uint32(msg[off:]), off + 4, nil +} + +func packUint32(i uint32, msg []byte, off int) (off1 int, err error) { + if off+4 > len(msg) { + return len(msg), &Error{err: "overflow packing uint32"} + } + binary.BigEndian.PutUint32(msg[off:], i) + return off + 4, nil +} + +func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) { + if off+6 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"} + } + // Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes) + i = (uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 | + uint64(msg[off+4])<<8 | uint64(msg[off+5]))) + off += 6 + return i, off, nil +} + +func packUint48(i uint64, msg []byte, off int) (off1 int, err error) { + if off+6 > len(msg) { + return len(msg), &Error{err: "overflow packing uint64 as uint48"} + } + msg[off] = byte(i >> 40) + msg[off+1] = byte(i >> 32) + msg[off+2] = byte(i >> 24) + msg[off+3] = byte(i >> 16) + msg[off+4] = byte(i >> 8) + msg[off+5] = byte(i) + off += 6 + return off, nil +} + +func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) { + if off+8 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint64"} + } + return binary.BigEndian.Uint64(msg[off:]), off + 8, nil +} + +func packUint64(i uint64, msg []byte, off int) (off1 int, err error) { + if off+8 > len(msg) { + return len(msg), &Error{err: "overflow packing uint64"} + } + binary.BigEndian.PutUint64(msg[off:], i) + off += 8 + return off, nil +} + +func unpackString(msg []byte, off int) (string, int, error) { + if off+1 > len(msg) { + return "", off, &Error{err: "overflow unpacking txt"} + } + l := int(msg[off]) + if off+l+1 > len(msg) { + return "", off, &Error{err: "overflow unpacking txt"} + } + s := make([]byte, 0, l) + for _, b := range msg[off+1 : off+1+l] { + switch b { + case '"', '\\': + s = append(s, '\\', b) + case '\t', '\r', '\n': + s = append(s, b) + default: + if b < 32 || b > 127 { // unprintable + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + } else { + s = append(s, b) + } + } + } + off += 1 + l + return string(s), off, nil +} + +func packString(s string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) + off, err := packTxtString(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackStringBase32(msg []byte, off, end int) (string, int, error) { + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking base32"} + } + s := toBase32(msg[off:end]) + return s, end, nil +} + +func packStringBase32(s string, msg []byte, off int) (int, error) { + b32, err := fromBase32([]byte(s)) + if err != nil { + return len(msg), err + } + if off+len(b32) > len(msg) { + return len(msg), &Error{err: "overflow packing base32"} + } + copy(msg[off:off+len(b32)], b32) + off += len(b32) + return off, nil +} + +func unpackStringBase64(msg []byte, off, end int) (string, int, error) { + // Rest of the RR is base64 encoded value, so we don't need an explicit length + // to be set. Thus far all RR's that have base64 encoded fields have those as their + // last one. What we do need is the end of the RR! + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking base64"} + } + s := toBase64(msg[off:end]) + return s, end, nil +} + +func packStringBase64(s string, msg []byte, off int) (int, error) { + b64, err := fromBase64([]byte(s)) + if err != nil { + return len(msg), err + } + if off+len(b64) > len(msg) { + return len(msg), &Error{err: "overflow packing base64"} + } + copy(msg[off:off+len(b64)], b64) + off += len(b64) + return off, nil +} + +func unpackStringHex(msg []byte, off, end int) (string, int, error) { + // Rest of the RR is hex encoded value, so we don't need an explicit length + // to be set. NSEC and TSIG have hex fields with a length field. + // What we do need is the end of the RR! + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking hex"} + } + + s := hex.EncodeToString(msg[off:end]) + return s, end, nil +} + +func packStringHex(s string, msg []byte, off int) (int, error) { + h, err := hex.DecodeString(s) + if err != nil { + return len(msg), err + } + if off+(len(h)) > len(msg) { + return len(msg), &Error{err: "overflow packing hex"} + } + copy(msg[off:off+len(h)], h) + off += len(h) + return off, nil +} + +func unpackStringTxt(msg []byte, off int) ([]string, int, error) { + txt, off, err := unpackTxt(msg, off) + if err != nil { + return nil, len(msg), err + } + return txt, off, nil +} + +func packStringTxt(s []string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many. + off, err := packTxt(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) { + var edns []EDNS0 +Option: + code := uint16(0) + if off+4 > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + code = binary.BigEndian.Uint16(msg[off:]) + off += 2 + optlen := binary.BigEndian.Uint16(msg[off:]) + off += 2 + if off+int(optlen) > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + switch code { + case EDNS0NSID: + e := new(EDNS0_NSID) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0SUBNET, EDNS0SUBNETDRAFT: + e := new(EDNS0_SUBNET) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + if code == EDNS0SUBNETDRAFT { + e.DraftOption = true + } + case EDNS0COOKIE: + e := new(EDNS0_COOKIE) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0UL: + e := new(EDNS0_UL) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0LLQ: + e := new(EDNS0_LLQ) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0DAU: + e := new(EDNS0_DAU) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0DHU: + e := new(EDNS0_DHU) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0N3U: + e := new(EDNS0_N3U) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + default: + e := new(EDNS0_LOCAL) + e.Code = code + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + } + + if off < len(msg) { + goto Option + } + + return edns, off, nil +} + +func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) { + for _, el := range options { + b, err := el.pack() + if err != nil || off+3 > len(msg) { + return len(msg), &Error{err: "overflow packing opt"} + } + binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code + binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length + off += 4 + if off+len(b) > len(msg) { + copy(msg[off:], b) + off = len(msg) + continue + } + // Actual data + copy(msg[off:off+len(b)], b) + off += len(b) + } + return off, nil +} + +func unpackStringOctet(msg []byte, off int) (string, int, error) { + s := string(msg[off:]) + return s, len(msg), nil +} + +func packStringOctet(s string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) + off, err := packOctetString(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) { + var nsec []uint16 + length, window, lastwindow := 0, 0, -1 + for off < len(msg) { + if off+2 > len(msg) { + return nsec, len(msg), &Error{err: "overflow unpacking nsecx"} + } + window = int(msg[off]) + length = int(msg[off+1]) + off += 2 + if window <= lastwindow { + // RFC 4034: Blocks are present in the NSEC RR RDATA in + // increasing numerical order. + return nsec, len(msg), &Error{err: "out of order NSEC block"} + } + if length == 0 { + // RFC 4034: Blocks with no types present MUST NOT be included. + return nsec, len(msg), &Error{err: "empty NSEC block"} + } + if length > 32 { + return nsec, len(msg), &Error{err: "NSEC block too long"} + } + if off+length > len(msg) { + return nsec, len(msg), &Error{err: "overflowing NSEC block"} + } + + // Walk the bytes in the window and extract the type bits + for j := 0; j < length; j++ { + b := msg[off+j] + // Check the bits one by one, and set the type + if b&0x80 == 0x80 { + nsec = append(nsec, uint16(window*256+j*8+0)) + } + if b&0x40 == 0x40 { + nsec = append(nsec, uint16(window*256+j*8+1)) + } + if b&0x20 == 0x20 { + nsec = append(nsec, uint16(window*256+j*8+2)) + } + if b&0x10 == 0x10 { + nsec = append(nsec, uint16(window*256+j*8+3)) + } + if b&0x8 == 0x8 { + nsec = append(nsec, uint16(window*256+j*8+4)) + } + if b&0x4 == 0x4 { + nsec = append(nsec, uint16(window*256+j*8+5)) + } + if b&0x2 == 0x2 { + nsec = append(nsec, uint16(window*256+j*8+6)) + } + if b&0x1 == 0x1 { + nsec = append(nsec, uint16(window*256+j*8+7)) + } + } + off += length + lastwindow = window + } + return nsec, off, nil +} + +func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) { + if len(bitmap) == 0 { + return off, nil + } + var lastwindow, lastlength uint16 + for j := 0; j < len(bitmap); j++ { + t := bitmap[j] + window := t / 256 + length := (t-window*256)/8 + 1 + if window > lastwindow && lastlength != 0 { // New window, jump to the new offset + off += int(lastlength) + 2 + lastlength = 0 + } + if window < lastwindow || length < lastlength { + return len(msg), &Error{err: "nsec bits out of order"} + } + if off+2+int(length) > len(msg) { + return len(msg), &Error{err: "overflow packing nsec"} + } + // Setting the window # + msg[off] = byte(window) + // Setting the octets length + msg[off+1] = byte(length) + // Setting the bit value for the type in the right octet + msg[off+1+int(length)] |= byte(1 << (7 - (t % 8))) + lastwindow, lastlength = window, length + } + off += int(lastlength) + 2 + return off, nil +} + +func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) { + var ( + servers []string + s string + err error + ) + if end > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking domain names"} + } + for off < end { + s, off, err = UnpackDomainName(msg, off) + if err != nil { + return servers, len(msg), err + } + servers = append(servers, s) + } + return servers, off, nil +} + +func packDataDomainNames(names []string, msg []byte, off int, compression map[string]int, compress bool) (int, error) { + var err error + for j := 0; j < len(names); j++ { + off, err = PackDomainName(names[j], msg, off, compression, false && compress) + if err != nil { + return len(msg), err + } + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go new file mode 100644 index 000000000000..6f10f3e65bcc --- /dev/null +++ b/vendor/github.com/miekg/dns/nsecx.go @@ -0,0 +1,119 @@ +package dns + +import ( + "crypto/sha1" + "hash" + "io" + "strings" +) + +type saltWireFmt struct { + Salt string `dns:"size-hex"` +} + +// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase. +func HashName(label string, ha uint8, iter uint16, salt string) string { + saltwire := new(saltWireFmt) + saltwire.Salt = salt + wire := make([]byte, DefaultMsgSize) + n, err := packSaltWire(saltwire, wire) + if err != nil { + return "" + } + wire = wire[:n] + name := make([]byte, 255) + off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false) + if err != nil { + return "" + } + name = name[:off] + var s hash.Hash + switch ha { + case SHA1: + s = sha1.New() + default: + return "" + } + + // k = 0 + name = append(name, wire...) + io.WriteString(s, string(name)) + nsec3 := s.Sum(nil) + // k > 0 + for k := uint16(0); k < iter; k++ { + s.Reset() + nsec3 = append(nsec3, wire...) + io.WriteString(s, string(nsec3)) + nsec3 = s.Sum(nil) + } + return toBase32(nsec3) +} + +// Denialer is an interface that should be implemented by types that are used to denial +// answers in DNSSEC. +type Denialer interface { + // Cover will check if the (unhashed) name is being covered by this NSEC or NSEC3. + Cover(name string) bool + // Match will check if the ownername matches the (unhashed) name for this NSEC3 or NSEC3. + Match(name string) bool +} + +// Cover implements the Denialer interface. +func (rr *NSEC) Cover(name string) bool { + return true +} + +// Match implements the Denialer interface. +func (rr *NSEC) Match(name string) bool { + return true +} + +// Cover implements the Denialer interface. +func (rr *NSEC3) Cover(name string) bool { + // FIXME(miek): check if the zones match + // FIXME(miek): check if we're not dealing with parent nsec3 + hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + labels := Split(rr.Hdr.Name) + if len(labels) < 2 { + return false + } + hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the dot + if hash == rr.NextDomain { + return false // empty interval + } + if hash > rr.NextDomain { // last name, points to apex + // hname > hash + // hname > rr.NextDomain + // TODO(miek) + } + if hname <= hash { + return false + } + if hname >= rr.NextDomain { + return false + } + return true +} + +// Match implements the Denialer interface. +func (rr *NSEC3) Match(name string) bool { + // FIXME(miek): Check if we are in the same zone + hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + labels := Split(rr.Hdr.Name) + if len(labels) < 2 { + return false + } + hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the . + if hash == hname { + return true + } + return false +} + +func packSaltWire(sw *saltWireFmt, msg []byte) (int, error) { + off, err := packStringHex(sw.Salt, msg, 0) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go new file mode 100644 index 000000000000..6b08e6e9592e --- /dev/null +++ b/vendor/github.com/miekg/dns/privaterr.go @@ -0,0 +1,149 @@ +package dns + +import ( + "fmt" + "strings" +) + +// PrivateRdata is an interface used for implementing "Private Use" RR types, see +// RFC 6895. This allows one to experiment with new RR types, without requesting an +// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove. +type PrivateRdata interface { + // String returns the text presentaton of the Rdata of the Private RR. + String() string + // Parse parses the Rdata of the private RR. + Parse([]string) error + // Pack is used when packing a private RR into a buffer. + Pack([]byte) (int, error) + // Unpack is used when unpacking a private RR from a buffer. + // TODO(miek): diff. signature than Pack, see edns0.go for instance. + Unpack([]byte) (int, error) + // Copy copies the Rdata. + Copy(PrivateRdata) error + // Len returns the length in octets of the Rdata. + Len() int +} + +// PrivateRR represents an RR that uses a PrivateRdata user-defined type. +// It mocks normal RRs and implements dns.RR interface. +type PrivateRR struct { + Hdr RR_Header + Data PrivateRdata +} + +func mkPrivateRR(rrtype uint16) *PrivateRR { + // Panics if RR is not an instance of PrivateRR. + rrfunc, ok := TypeToRR[rrtype] + if !ok { + panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype)) + } + + anyrr := rrfunc() + switch rr := anyrr.(type) { + case *PrivateRR: + return rr + } + panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr)) +} + +// Header return the RR header of r. +func (r *PrivateRR) Header() *RR_Header { return &r.Hdr } + +func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() } + +// Private len and copy parts to satisfy RR interface. +func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() } +func (r *PrivateRR) copy() RR { + // make new RR like this: + rr := mkPrivateRR(r.Hdr.Rrtype) + newh := r.Hdr.copyHeader() + rr.Hdr = *newh + + err := r.Data.Copy(rr.Data) + if err != nil { + panic("dns: got value that could not be used to copy Private rdata") + } + return rr +} +func (r *PrivateRR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := r.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + n, err := r.Data.Pack(msg[off:]) + if err != nil { + return len(msg), err + } + off += n + r.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +// PrivateHandle registers a private resource record type. It requires +// string and numeric representation of private RR type and generator function as argument. +func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) { + rtypestr = strings.ToUpper(rtypestr) + + TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} } + TypeToString[rtype] = rtypestr + StringToType[rtypestr] = rtype + + typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) { + if noRdata(h) { + return &h, off, nil + } + var err error + + rr := mkPrivateRR(h.Rrtype) + rr.Hdr = h + + off1, err := rr.Data.Unpack(msg[off:]) + off += off1 + if err != nil { + return rr, off, err + } + return rr, off, err + } + + setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := mkPrivateRR(h.Rrtype) + rr.Hdr = h + + var l lex + text := make([]string, 0, 2) // could be 0..N elements, median is probably 1 + Fetch: + for { + // TODO(miek): we could also be returning _QUOTE, this might or might not + // be an issue (basically parsing TXT becomes hard) + switch l = <-c; l.value { + case zNewline, zEOF: + break Fetch + case zString: + text = append(text, l.token) + } + } + + err := rr.Data.Parse(text) + if err != nil { + return nil, &ParseError{f, err.Error(), l}, "" + } + + return rr, nil, "" + } + + typeToparserFunc[rtype] = parserFunc{setPrivateRR, true} +} + +// PrivateHandleRemove removes defenitions required to support private RR type. +func PrivateHandleRemove(rtype uint16) { + rtypestr, ok := TypeToString[rtype] + if ok { + delete(TypeToRR, rtype) + delete(TypeToString, rtype) + delete(typeToparserFunc, rtype) + delete(StringToType, rtypestr) + delete(typeToUnpack, rtype) + } + return +} diff --git a/vendor/github.com/miekg/dns/rawmsg.go b/vendor/github.com/miekg/dns/rawmsg.go new file mode 100644 index 000000000000..6e21fba7e1fb --- /dev/null +++ b/vendor/github.com/miekg/dns/rawmsg.go @@ -0,0 +1,49 @@ +package dns + +import "encoding/binary" + +// rawSetRdlength sets the rdlength in the header of +// the RR. The offset 'off' must be positioned at the +// start of the header of the RR, 'end' must be the +// end of the RR. +func rawSetRdlength(msg []byte, off, end int) bool { + l := len(msg) +Loop: + for { + if off+1 > l { + return false + } + c := int(msg[off]) + off++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // End of the domainname + break Loop + } + if off+c > l { + return false + } + off += c + + case 0xC0: + // pointer, next byte included, ends domainname + off++ + break Loop + } + } + // The domainname has been seen, we at the start of the fixed part in the header. + // Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length. + off += 2 + 2 + 4 + if off+2 > l { + return false + } + //off+1 is the end of the header, 'end' is the end of the rr + //so 'end' - 'off+2' is the length of the rdata + rdatalen := end - (off + 2) + if rdatalen > 0xFFFF { + return false + } + binary.BigEndian.PutUint16(msg[off:], uint16(rdatalen)) + return true +} diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/miekg/dns/reverse.go new file mode 100644 index 000000000000..099dac9486d5 --- /dev/null +++ b/vendor/github.com/miekg/dns/reverse.go @@ -0,0 +1,38 @@ +package dns + +// StringToType is the reverse of TypeToString, needed for string parsing. +var StringToType = reverseInt16(TypeToString) + +// StringToClass is the reverse of ClassToString, needed for string parsing. +var StringToClass = reverseInt16(ClassToString) + +// Map of opcodes strings. +var StringToOpcode = reverseInt(OpcodeToString) + +// Map of rcodes strings. +var StringToRcode = reverseInt(RcodeToString) + +// Reverse a map +func reverseInt8(m map[uint8]string) map[string]uint8 { + n := make(map[string]uint8, len(m)) + for u, s := range m { + n[s] = u + } + return n +} + +func reverseInt16(m map[uint16]string) map[string]uint16 { + n := make(map[string]uint16, len(m)) + for u, s := range m { + n[s] = u + } + return n +} + +func reverseInt(m map[int]string) map[string]int { + n := make(map[string]int, len(m)) + for u, s := range m { + n[s] = u + } + return n +} diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/miekg/dns/sanitize.go new file mode 100644 index 000000000000..b489f3f050bf --- /dev/null +++ b/vendor/github.com/miekg/dns/sanitize.go @@ -0,0 +1,84 @@ +package dns + +// Dedup removes identical RRs from rrs. It preserves the original ordering. +// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies +// rrs. +// m is used to store the RRs temporay. If it is nil a new map will be allocated. +func Dedup(rrs []RR, m map[string]RR) []RR { + if m == nil { + m = make(map[string]RR) + } + // Save the keys, so we don't have to call normalizedString twice. + keys := make([]*string, 0, len(rrs)) + + for _, r := range rrs { + key := normalizedString(r) + keys = append(keys, &key) + if _, ok := m[key]; ok { + // Shortest TTL wins. + if m[key].Header().Ttl > r.Header().Ttl { + m[key].Header().Ttl = r.Header().Ttl + } + continue + } + + m[key] = r + } + // If the length of the result map equals the amount of RRs we got, + // it means they were all different. We can then just return the original rrset. + if len(m) == len(rrs) { + return rrs + } + + j := 0 + for i, r := range rrs { + // If keys[i] lives in the map, we should copy and remove it. + if _, ok := m[*keys[i]]; ok { + delete(m, *keys[i]) + rrs[j] = r + j++ + } + + if len(m) == 0 { + break + } + } + + return rrs[:j] +} + +// normalizedString returns a normalized string from r. The TTL +// is removed and the domain name is lowercased. We go from this: +// DomainNameTTLCLASSTYPERDATA to: +// lowercasenameCLASSTYPE... +func normalizedString(r RR) string { + // A string Go DNS makes has: domainnameTTL... + b := []byte(r.String()) + + // find the first non-escaped tab, then another, so we capture where the TTL lives. + esc := false + ttlStart, ttlEnd := 0, 0 + for i := 0; i < len(b) && ttlEnd == 0; i++ { + switch { + case b[i] == '\\': + esc = !esc + case b[i] == '\t' && !esc: + if ttlStart == 0 { + ttlStart = i + continue + } + if ttlEnd == 0 { + ttlEnd = i + } + case b[i] >= 'A' && b[i] <= 'Z' && !esc: + b[i] += 32 + default: + esc = false + } + } + + // remove TTL. + copy(b[ttlStart:], b[ttlEnd:]) + cut := ttlEnd - ttlStart + return string(b[:len(b)-cut]) +} diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go new file mode 100644 index 000000000000..0e83797fb597 --- /dev/null +++ b/vendor/github.com/miekg/dns/scan.go @@ -0,0 +1,974 @@ +package dns + +import ( + "io" + "log" + "os" + "strconv" + "strings" +) + +type debugging bool + +const debug debugging = false + +func (d debugging) Printf(format string, args ...interface{}) { + if d { + log.Printf(format, args...) + } +} + +const maxTok = 2048 // Largest token we can return. +const maxUint16 = 1<<16 - 1 + +// Tokinize a RFC 1035 zone file. The tokenizer will normalize it: +// * Add ownernames if they are left blank; +// * Suppress sequences of spaces; +// * Make each RR fit on one line (_NEWLINE is send as last) +// * Handle comments: ; +// * Handle braces - anywhere. +const ( + // Zonefile + zEOF = iota + zString + zBlank + zQuote + zNewline + zRrtpe + zOwner + zClass + zDirOrigin // $ORIGIN + zDirTtl // $TTL + zDirInclude // $INCLUDE + zDirGenerate // $GENERATE + + // Privatekey file + zValue + zKey + + zExpectOwnerDir // Ownername + zExpectOwnerBl // Whitespace after the ownername + zExpectAny // Expect rrtype, ttl or class + zExpectAnyNoClass // Expect rrtype or ttl + zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS + zExpectAnyNoTtl // Expect rrtype or class + zExpectAnyNoTtlBl // Whitespace after _EXPECT_ANY_NOTTL + zExpectRrtype // Expect rrtype + zExpectRrtypeBl // Whitespace BEFORE rrtype + zExpectRdata // The first element of the rdata + zExpectDirTtlBl // Space after directive $TTL + zExpectDirTtl // Directive $TTL + zExpectDirOriginBl // Space after directive $ORIGIN + zExpectDirOrigin // Directive $ORIGIN + zExpectDirIncludeBl // Space after directive $INCLUDE + zExpectDirInclude // Directive $INCLUDE + zExpectDirGenerate // Directive $GENERATE + zExpectDirGenerateBl // Space after directive $GENERATE +) + +// ParseError is a parsing error. It contains the parse error and the location in the io.Reader +// where the error occurred. +type ParseError struct { + file string + err string + lex lex +} + +func (e *ParseError) Error() (s string) { + if e.file != "" { + s = e.file + ": " + } + s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " + + strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column) + return +} + +type lex struct { + token string // text of the token + tokenUpper string // uppercase text of the token + length int // length of the token + err bool // when true, token text has lexer error + value uint8 // value: zString, _BLANK, etc. + line int // line in the file + column int // column in the file + torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar + comment string // any comment text seen +} + +// Token holds the token that are returned when a zone file is parsed. +type Token struct { + // The scanned resource record when error is not nil. + RR + // When an error occurred, this has the error specifics. + Error *ParseError + // A potential comment positioned after the RR and on the same line. + Comment string +} + +// NewRR reads the RR contained in the string s. Only the first RR is +// returned. If s contains no RR, return nil with no error. The class +// defaults to IN and TTL defaults to 3600. The full zone file syntax +// like $TTL, $ORIGIN, etc. is supported. All fields of the returned +// RR are set, except RR.Header().Rdlength which is set to 0. +func NewRR(s string) (RR, error) { + if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline + return ReadRR(strings.NewReader(s+"\n"), "") + } + return ReadRR(strings.NewReader(s), "") +} + +// ReadRR reads the RR contained in q. +// See NewRR for more documentation. +func ReadRR(q io.Reader, filename string) (RR, error) { + r := <-parseZoneHelper(q, ".", filename, 1) + if r == nil { + return nil, nil + } + + if r.Error != nil { + return nil, r.Error + } + return r.RR, nil +} + +// ParseZone reads a RFC 1035 style zonefile from r. It returns *Tokens on the +// returned channel, which consist out the parsed RR, a potential comment or an error. +// If there is an error the RR is nil. The string file is only used +// in error reporting. The string origin is used as the initial origin, as +// if the file would start with: $ORIGIN origin . +// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported. +// The channel t is closed by ParseZone when the end of r is reached. +// +// Basic usage pattern when reading from a string (z) containing the +// zone data: +// +// for x := range dns.ParseZone(strings.NewReader(z), "", "") { +// if x.Error != nil { +// // log.Println(x.Error) +// } else { +// // Do something with x.RR +// } +// } +// +// Comments specified after an RR (and on the same line!) are returned too: +// +// foo. IN A 10.0.0.1 ; this is a comment +// +// The text "; this is comment" is returned in Token.Comment. Comments inside the +// RR are discarded. Comments on a line by themselves are discarded too. +func ParseZone(r io.Reader, origin, file string) chan *Token { + return parseZoneHelper(r, origin, file, 10000) +} + +func parseZoneHelper(r io.Reader, origin, file string, chansize int) chan *Token { + t := make(chan *Token, chansize) + go parseZone(r, origin, file, t, 0) + return t +} + +func parseZone(r io.Reader, origin, f string, t chan *Token, include int) { + defer func() { + if include == 0 { + close(t) + } + }() + s := scanInit(r) + c := make(chan lex) + // Start the lexer + go zlexer(s, c) + // 6 possible beginnings of a line, _ is a space + // 0. zRRTYPE -> all omitted until the rrtype + // 1. zOwner _ zRrtype -> class/ttl omitted + // 2. zOwner _ zString _ zRrtype -> class omitted + // 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class + // 4. zOwner _ zClass _ zRrtype -> ttl omitted + // 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed) + // After detecting these, we know the zRrtype so we can jump to functions + // handling the rdata for each of these types. + + if origin == "" { + origin = "." + } + origin = Fqdn(origin) + if _, ok := IsDomainName(origin); !ok { + t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}} + return + } + + st := zExpectOwnerDir // initial state + var h RR_Header + var defttl uint32 = defaultTtl + var prevName string + for l := range c { + // Lexer spotted an error already + if l.err == true { + t <- &Token{Error: &ParseError{f, l.token, l}} + return + + } + switch st { + case zExpectOwnerDir: + // We can also expect a directive, like $TTL or $ORIGIN + h.Ttl = defttl + h.Class = ClassINET + switch l.value { + case zNewline: + st = zExpectOwnerDir + case zOwner: + h.Name = l.token + if l.token[0] == '@' { + h.Name = origin + prevName = h.Name + st = zExpectOwnerBl + break + } + if h.Name[l.length-1] != '.' { + h.Name = appendOrigin(h.Name, origin) + } + _, ok := IsDomainName(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "bad owner name", l}} + return + } + prevName = h.Name + st = zExpectOwnerBl + case zDirTtl: + st = zExpectDirTtlBl + case zDirOrigin: + st = zExpectDirOriginBl + case zDirInclude: + st = zExpectDirIncludeBl + case zDirGenerate: + st = zExpectDirGenerateBl + case zRrtpe: + h.Name = prevName + h.Rrtype = l.torc + st = zExpectRdata + case zClass: + h.Name = prevName + h.Class = l.torc + st = zExpectAnyNoClassBl + case zBlank: + // Discard, can happen when there is nothing on the + // line except the RR type + case zString: + ttl, ok := stringToTtl(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + // Don't about the defttl, we should take the $TTL value + // defttl = ttl + st = zExpectAnyNoTtlBl + + default: + t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}} + return + } + case zExpectDirIncludeBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}} + return + } + st = zExpectDirInclude + case zExpectDirInclude: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}} + return + } + neworigin := origin // There may be optionally a new origin set after the filename, if not use current one + l := <-c + switch l.value { + case zBlank: + l := <-c + if l.value == zString { + if _, ok := IsDomainName(l.token); !ok || l.length == 0 || l.err { + t <- &Token{Error: &ParseError{f, "bad origin name", l}} + return + } + // a new origin is specified. + if l.token[l.length-1] != '.' { + if origin != "." { // Prevent .. endings + neworigin = l.token + "." + origin + } else { + neworigin = l.token + origin + } + } else { + neworigin = l.token + } + } + case zNewline, zEOF: + // Ok + default: + t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}} + return + } + // Start with the new file + r1, e1 := os.Open(l.token) + if e1 != nil { + t <- &Token{Error: &ParseError{f, "failed to open `" + l.token + "'", l}} + return + } + if include+1 > 7 { + t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}} + return + } + parseZone(r1, l.token, neworigin, t, include+1) + st = zExpectOwnerDir + case zExpectDirTtlBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}} + return + } + st = zExpectDirTtl + case zExpectDirTtl: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} + return + } + if e, _ := slurpRemainder(c, f); e != nil { + t <- &Token{Error: e} + return + } + ttl, ok := stringToTtl(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} + return + } + defttl = ttl + st = zExpectOwnerDir + case zExpectDirOriginBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}} + return + } + st = zExpectDirOrigin + case zExpectDirOrigin: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}} + return + } + if e, _ := slurpRemainder(c, f); e != nil { + t <- &Token{Error: e} + } + if _, ok := IsDomainName(l.token); !ok { + t <- &Token{Error: &ParseError{f, "bad origin name", l}} + return + } + if l.token[l.length-1] != '.' { + if origin != "." { // Prevent .. endings + origin = l.token + "." + origin + } else { + origin = l.token + origin + } + } else { + origin = l.token + } + st = zExpectOwnerDir + case zExpectDirGenerateBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}} + return + } + st = zExpectDirGenerate + case zExpectDirGenerate: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}} + return + } + if errMsg := generate(l, c, t, origin); errMsg != "" { + t <- &Token{Error: &ParseError{f, errMsg, l}} + return + } + st = zExpectOwnerDir + case zExpectOwnerBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after owner", l}} + return + } + st = zExpectAny + case zExpectAny: + switch l.value { + case zRrtpe: + h.Rrtype = l.torc + st = zExpectRdata + case zClass: + h.Class = l.torc + st = zExpectAnyNoClassBl + case zString: + ttl, ok := stringToTtl(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + // defttl = ttl // don't set the defttl here + st = zExpectAnyNoTtlBl + default: + t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}} + return + } + case zExpectAnyNoClassBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before class", l}} + return + } + st = zExpectAnyNoClass + case zExpectAnyNoTtlBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before TTL", l}} + return + } + st = zExpectAnyNoTtl + case zExpectAnyNoTtl: + switch l.value { + case zClass: + h.Class = l.torc + st = zExpectRrtypeBl + case zRrtpe: + h.Rrtype = l.torc + st = zExpectRdata + default: + t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}} + return + } + case zExpectAnyNoClass: + switch l.value { + case zString: + ttl, ok := stringToTtl(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + // defttl = ttl // don't set the def ttl anymore + st = zExpectRrtypeBl + case zRrtpe: + h.Rrtype = l.torc + st = zExpectRdata + default: + t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}} + return + } + case zExpectRrtypeBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before RR type", l}} + return + } + st = zExpectRrtype + case zExpectRrtype: + if l.value != zRrtpe { + t <- &Token{Error: &ParseError{f, "unknown RR type", l}} + return + } + h.Rrtype = l.torc + st = zExpectRdata + case zExpectRdata: + r, e, c1 := setRR(h, c, origin, f) + if e != nil { + // If e.lex is nil than we have encounter a unknown RR type + // in that case we substitute our current lex token + if e.lex.token == "" && e.lex.value == 0 { + e.lex = l // Uh, dirty + } + t <- &Token{Error: e} + return + } + t <- &Token{RR: r, Comment: c1} + st = zExpectOwnerDir + } + } + // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this + // is not an error, because an empty zone file is still a zone file. +} + +// zlexer scans the sourcefile and returns tokens on the channel c. +func zlexer(s *scan, c chan lex) { + var l lex + str := make([]byte, maxTok) // Should be enough for any token + stri := 0 // Offset in str (0 means empty) + com := make([]byte, maxTok) // Hold comment text + comi := 0 + quote := false + escape := false + space := false + commt := false + rrtype := false + owner := true + brace := 0 + x, err := s.tokenText() + defer close(c) + for err == nil { + l.column = s.position.Column + l.line = s.position.Line + if stri >= maxTok { + l.token = "token length insufficient for parsing" + l.err = true + debug.Printf("[%+v]", l.token) + c <- l + return + } + if comi >= maxTok { + l.token = "comment length insufficient for parsing" + l.err = true + debug.Printf("[%+v]", l.token) + c <- l + return + } + + switch x { + case ' ', '\t': + if escape { + escape = false + str[stri] = x + stri++ + break + } + if quote { + // Inside quotes this is legal + str[stri] = x + stri++ + break + } + if commt { + com[comi] = x + comi++ + break + } + if stri == 0 { + // Space directly in the beginning, handled in the grammar + } else if owner { + // If we have a string and its the first, make it an owner + l.value = zOwner + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + // escape $... start with a \ not a $, so this will work + switch l.tokenUpper { + case "$TTL": + l.value = zDirTtl + case "$ORIGIN": + l.value = zDirOrigin + case "$INCLUDE": + l.value = zDirInclude + case "$GENERATE": + l.value = zDirGenerate + } + debug.Printf("[7 %+v]", l.token) + c <- l + } else { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + if !rrtype { + if t, ok := StringToType[l.tokenUpper]; ok { + l.value = zRrtpe + l.torc = t + rrtype = true + } else { + if strings.HasPrefix(l.tokenUpper, "TYPE") { + t, ok := typeToInt(l.token) + if !ok { + l.token = "unknown RR type" + l.err = true + c <- l + return + } + l.value = zRrtpe + l.torc = t + } + } + if t, ok := StringToClass[l.tokenUpper]; ok { + l.value = zClass + l.torc = t + } else { + if strings.HasPrefix(l.tokenUpper, "CLASS") { + t, ok := classToInt(l.token) + if !ok { + l.token = "unknown class" + l.err = true + c <- l + return + } + l.value = zClass + l.torc = t + } + } + } + debug.Printf("[6 %+v]", l.token) + c <- l + } + stri = 0 + // I reverse space stuff here + if !space && !commt { + l.value = zBlank + l.token = " " + l.length = 1 + debug.Printf("[5 %+v]", l.token) + c <- l + } + owner = false + space = true + case ';': + if escape { + escape = false + str[stri] = x + stri++ + break + } + if quote { + // Inside quotes this is legal + str[stri] = x + stri++ + break + } + if stri > 0 { + l.value = zString + l.token = string(str[:stri]) + l.length = stri + debug.Printf("[4 %+v]", l.token) + c <- l + stri = 0 + } + commt = true + com[comi] = ';' + comi++ + case '\r': + escape = false + if quote { + str[stri] = x + stri++ + break + } + // discard if outside of quotes + case '\n': + escape = false + // Escaped newline + if quote { + str[stri] = x + stri++ + break + } + // inside quotes this is legal + if commt { + // Reset a comment + commt = false + rrtype = false + stri = 0 + // If not in a brace this ends the comment AND the RR + if brace == 0 { + owner = true + owner = true + l.value = zNewline + l.token = "\n" + l.length = 1 + l.comment = string(com[:comi]) + debug.Printf("[3 %+v %+v]", l.token, l.comment) + c <- l + l.comment = "" + comi = 0 + break + } + com[comi] = ' ' // convert newline to space + comi++ + break + } + + if brace == 0 { + // If there is previous text, we should output it here + if stri != 0 { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + + l.length = stri + if !rrtype { + if t, ok := StringToType[l.tokenUpper]; ok { + l.value = zRrtpe + l.torc = t + rrtype = true + } + } + debug.Printf("[2 %+v]", l.token) + c <- l + } + l.value = zNewline + l.token = "\n" + l.length = 1 + debug.Printf("[1 %+v]", l.token) + c <- l + stri = 0 + commt = false + rrtype = false + owner = true + comi = 0 + } + case '\\': + // comments do not get escaped chars, everything is copied + if commt { + com[comi] = x + comi++ + break + } + // something already escaped must be in string + if escape { + str[stri] = x + stri++ + escape = false + break + } + // something escaped outside of string gets added to string + str[stri] = x + stri++ + escape = true + case '"': + if commt { + com[comi] = x + comi++ + break + } + if escape { + str[stri] = x + stri++ + escape = false + break + } + space = false + // send previous gathered text and the quote + if stri != 0 { + l.value = zString + l.token = string(str[:stri]) + l.length = stri + + debug.Printf("[%+v]", l.token) + c <- l + stri = 0 + } + + // send quote itself as separate token + l.value = zQuote + l.token = "\"" + l.length = 1 + c <- l + quote = !quote + case '(', ')': + if commt { + com[comi] = x + comi++ + break + } + if escape { + str[stri] = x + stri++ + escape = false + break + } + if quote { + str[stri] = x + stri++ + break + } + switch x { + case ')': + brace-- + if brace < 0 { + l.token = "extra closing brace" + l.err = true + debug.Printf("[%+v]", l.token) + c <- l + return + } + case '(': + brace++ + } + default: + escape = false + if commt { + com[comi] = x + comi++ + break + } + str[stri] = x + stri++ + space = false + } + x, err = s.tokenText() + } + if stri > 0 { + // Send remainder + l.token = string(str[:stri]) + l.length = stri + l.value = zString + debug.Printf("[%+v]", l.token) + c <- l + } +} + +// Extract the class number from CLASSxx +func classToInt(token string) (uint16, bool) { + offset := 5 + if len(token) < offset+1 { + return 0, false + } + class, ok := strconv.Atoi(token[offset:]) + if ok != nil || class > maxUint16 { + return 0, false + } + return uint16(class), true +} + +// Extract the rr number from TYPExxx +func typeToInt(token string) (uint16, bool) { + offset := 4 + if len(token) < offset+1 { + return 0, false + } + typ, ok := strconv.Atoi(token[offset:]) + if ok != nil || typ > maxUint16 { + return 0, false + } + return uint16(typ), true +} + +// Parse things like 2w, 2m, etc, Return the time in seconds. +func stringToTtl(token string) (uint32, bool) { + s := uint32(0) + i := uint32(0) + for _, c := range token { + switch c { + case 's', 'S': + s += i + i = 0 + case 'm', 'M': + s += i * 60 + i = 0 + case 'h', 'H': + s += i * 60 * 60 + i = 0 + case 'd', 'D': + s += i * 60 * 60 * 24 + i = 0 + case 'w', 'W': + s += i * 60 * 60 * 24 * 7 + i = 0 + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i *= 10 + i += uint32(c) - '0' + default: + return 0, false + } + } + return s + i, true +} + +// Parse LOC records' [.][mM] into a +// mantissa exponent format. Token should contain the entire +// string (i.e. no spaces allowed) +func stringToCm(token string) (e, m uint8, ok bool) { + if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' { + token = token[0 : len(token)-1] + } + s := strings.SplitN(token, ".", 2) + var meters, cmeters, val int + var err error + switch len(s) { + case 2: + if cmeters, err = strconv.Atoi(s[1]); err != nil { + return + } + fallthrough + case 1: + if meters, err = strconv.Atoi(s[0]); err != nil { + return + } + case 0: + // huh? + return 0, 0, false + } + ok = true + if meters > 0 { + e = 2 + val = meters + } else { + e = 0 + val = cmeters + } + for val > 10 { + e++ + val /= 10 + } + if e > 9 { + ok = false + } + m = uint8(val) + return +} + +func appendOrigin(name, origin string) string { + if origin == "." { + return name + origin + } + return name + "." + origin +} + +// LOC record helper function +func locCheckNorth(token string, latitude uint32) (uint32, bool) { + switch token { + case "n", "N": + return LOC_EQUATOR + latitude, true + case "s", "S": + return LOC_EQUATOR - latitude, true + } + return latitude, false +} + +// LOC record helper function +func locCheckEast(token string, longitude uint32) (uint32, bool) { + switch token { + case "e", "E": + return LOC_EQUATOR + longitude, true + case "w", "W": + return LOC_EQUATOR - longitude, true + } + return longitude, false +} + +// "Eat" the rest of the "line". Return potential comments +func slurpRemainder(c chan lex, f string) (*ParseError, string) { + l := <-c + com := "" + switch l.value { + case zBlank: + l = <-c + com = l.comment + if l.value != zNewline && l.value != zEOF { + return &ParseError{f, "garbage after rdata", l}, "" + } + case zNewline: + com = l.comment + case zEOF: + default: + return &ParseError{f, "garbage after rdata", l}, "" + } + return nil, com +} + +// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64" +// Used for NID and L64 record. +func stringToNodeID(l lex) (uint64, *ParseError) { + if len(l.token) < 19 { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + // There must be three colons at fixes postitions, if not its a parse error + if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19] + u, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + return u, nil +} diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go new file mode 100644 index 000000000000..e521dc063e95 --- /dev/null +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -0,0 +1,2143 @@ +package dns + +import ( + "encoding/base64" + "net" + "strconv" + "strings" +) + +type parserFunc struct { + // Func defines the function that parses the tokens and returns the RR + // or an error. The last string contains any comments in the line as + // they returned by the lexer as well. + Func func(h RR_Header, c chan lex, origin string, file string) (RR, *ParseError, string) + // Signals if the RR ending is of variable length, like TXT or records + // that have Hexadecimal or Base64 as their last element in the Rdata. Records + // that have a fixed ending or for instance A, AAAA, SOA and etc. + Variable bool +} + +// Parse the rdata of each rrtype. +// All data from the channel c is either zString or zBlank. +// After the rdata there may come a zBlank and then a zNewline +// or immediately a zNewline. If this is not the case we flag +// an *ParseError: garbage after rdata. +func setRR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + parserfunc, ok := typeToparserFunc[h.Rrtype] + if ok { + r, e, cm := parserfunc.Func(h, c, o, f) + if parserfunc.Variable { + return r, e, cm + } + if e != nil { + return nil, e, "" + } + e, cm = slurpRemainder(c, f) + if e != nil { + return nil, e, "" + } + return r, nil, cm + } + // RFC3957 RR (Unknown RR handling) + return setRFC3597(h, c, o, f) +} + +// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) +// or an error +func endingToString(c chan lex, errstr, f string) (string, *ParseError, string) { + s := "" + l := <-c // zString + for l.value != zNewline && l.value != zEOF { + if l.err { + return s, &ParseError{f, errstr, l}, "" + } + switch l.value { + case zString: + s += l.token + case zBlank: // Ok + default: + return "", &ParseError{f, errstr, l}, "" + } + l = <-c + } + return s, nil, l.comment +} + +// A remainder of the rdata with embedded spaces, return the parsed string slice (sans the spaces) +// or an error +func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, string) { + // Get the remaining data until we see a zNewline + quote := false + l := <-c + var s []string + if l.err { + return s, &ParseError{f, errstr, l}, "" + } + switch l.value == zQuote { + case true: // A number of quoted string + s = make([]string, 0) + empty := true + for l.value != zNewline && l.value != zEOF { + if l.err { + return nil, &ParseError{f, errstr, l}, "" + } + switch l.value { + case zString: + empty = false + if len(l.token) > 255 { + // split up tokens that are larger than 255 into 255-chunks + sx := []string{} + p, i := 0, 255 + for { + if i <= len(l.token) { + sx = append(sx, l.token[p:i]) + } else { + sx = append(sx, l.token[p:]) + break + + } + p, i = p+255, i+255 + } + s = append(s, sx...) + break + } + + s = append(s, l.token) + case zBlank: + if quote { + // zBlank can only be seen in between txt parts. + return nil, &ParseError{f, errstr, l}, "" + } + case zQuote: + if empty && quote { + s = append(s, "") + } + quote = !quote + empty = true + default: + return nil, &ParseError{f, errstr, l}, "" + } + l = <-c + } + if quote { + return nil, &ParseError{f, errstr, l}, "" + } + case false: // Unquoted text record + s = make([]string, 1) + for l.value != zNewline && l.value != zEOF { + if l.err { + return s, &ParseError{f, errstr, l}, "" + } + s[0] += l.token + l = <-c + } + } + return s, nil, l.comment +} + +func setA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(A) + rr.Hdr = h + + l := <-c + if l.length == 0 { // Dynamic updates. + return rr, nil, "" + } + rr.A = net.ParseIP(l.token) + if rr.A == nil || l.err { + return nil, &ParseError{f, "bad A A", l}, "" + } + return rr, nil, "" +} + +func setAAAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(AAAA) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + rr.AAAA = net.ParseIP(l.token) + if rr.AAAA == nil || l.err { + return nil, &ParseError{f, "bad AAAA AAAA", l}, "" + } + return rr, nil, "" +} + +func setNS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NS) + rr.Hdr = h + + l := <-c + rr.Ns = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Ns = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad NS Ns", l}, "" + } + if rr.Ns[l.length-1] != '.' { + rr.Ns = appendOrigin(rr.Ns, o) + } + return rr, nil, "" +} + +func setPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(PTR) + rr.Hdr = h + + l := <-c + rr.Ptr = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + if l.token == "@" { + rr.Ptr = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad PTR Ptr", l}, "" + } + if rr.Ptr[l.length-1] != '.' { + rr.Ptr = appendOrigin(rr.Ptr, o) + } + return rr, nil, "" +} + +func setNSAPPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSAPPTR) + rr.Hdr = h + + l := <-c + rr.Ptr = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Ptr = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad NSAP-PTR Ptr", l}, "" + } + if rr.Ptr[l.length-1] != '.' { + rr.Ptr = appendOrigin(rr.Ptr, o) + } + return rr, nil, "" +} + +func setRP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RP) + rr.Hdr = h + + l := <-c + rr.Mbox = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mbox = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RP Mbox", l}, "" + } + if rr.Mbox[l.length-1] != '.' { + rr.Mbox = appendOrigin(rr.Mbox, o) + } + } + <-c // zBlank + l = <-c + rr.Txt = l.token + if l.token == "@" { + rr.Txt = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RP Txt", l}, "" + } + if rr.Txt[l.length-1] != '.' { + rr.Txt = appendOrigin(rr.Txt, o) + } + return rr, nil, "" +} + +func setMR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MR) + rr.Hdr = h + + l := <-c + rr.Mr = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mr = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MR Mr", l}, "" + } + if rr.Mr[l.length-1] != '.' { + rr.Mr = appendOrigin(rr.Mr, o) + } + return rr, nil, "" +} + +func setMB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MB) + rr.Hdr = h + + l := <-c + rr.Mb = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mb = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MB Mb", l}, "" + } + if rr.Mb[l.length-1] != '.' { + rr.Mb = appendOrigin(rr.Mb, o) + } + return rr, nil, "" +} + +func setMG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MG) + rr.Hdr = h + + l := <-c + rr.Mg = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mg = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MG Mg", l}, "" + } + if rr.Mg[l.length-1] != '.' { + rr.Mg = appendOrigin(rr.Mg, o) + } + return rr, nil, "" +} + +func setHINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(HINFO) + rr.Hdr = h + + chunks, e, c1 := endingToTxtSlice(c, "bad HINFO Fields", f) + if e != nil { + return nil, e, c1 + } + + if ln := len(chunks); ln == 0 { + return rr, nil, "" + } else if ln == 1 { + // Can we split it? + if out := strings.Fields(chunks[0]); len(out) > 1 { + chunks = out + } else { + chunks = append(chunks, "") + } + } + + rr.Cpu = chunks[0] + rr.Os = strings.Join(chunks[1:], " ") + + return rr, nil, "" +} + +func setMINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MINFO) + rr.Hdr = h + + l := <-c + rr.Rmail = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Rmail = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MINFO Rmail", l}, "" + } + if rr.Rmail[l.length-1] != '.' { + rr.Rmail = appendOrigin(rr.Rmail, o) + } + } + <-c // zBlank + l = <-c + rr.Email = l.token + if l.token == "@" { + rr.Email = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MINFO Email", l}, "" + } + if rr.Email[l.length-1] != '.' { + rr.Email = appendOrigin(rr.Email, o) + } + return rr, nil, "" +} + +func setMF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MF) + rr.Hdr = h + + l := <-c + rr.Mf = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mf = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MF Mf", l}, "" + } + if rr.Mf[l.length-1] != '.' { + rr.Mf = appendOrigin(rr.Mf, o) + } + return rr, nil, "" +} + +func setMD(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MD) + rr.Hdr = h + + l := <-c + rr.Md = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Md = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MD Md", l}, "" + } + if rr.Md[l.length-1] != '.' { + rr.Md = appendOrigin(rr.Md, o) + } + return rr, nil, "" +} + +func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MX) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad MX Pref", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Mx = l.token + if l.token == "@" { + rr.Mx = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MX Mx", l}, "" + } + if rr.Mx[l.length-1] != '.' { + rr.Mx = appendOrigin(rr.Mx, o) + } + return rr, nil, "" +} + +func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RT) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil { + return nil, &ParseError{f, "bad RT Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Host = l.token + if l.token == "@" { + rr.Host = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RT Host", l}, "" + } + if rr.Host[l.length-1] != '.' { + rr.Host = appendOrigin(rr.Host, o) + } + return rr, nil, "" +} + +func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(AFSDB) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad AFSDB Subtype", l}, "" + } + rr.Subtype = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Hostname = l.token + if l.token == "@" { + rr.Hostname = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad AFSDB Hostname", l}, "" + } + if rr.Hostname[l.length-1] != '.' { + rr.Hostname = appendOrigin(rr.Hostname, o) + } + return rr, nil, "" +} + +func setX25(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(X25) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + if l.err { + return nil, &ParseError{f, "bad X25 PSDNAddress", l}, "" + } + rr.PSDNAddress = l.token + return rr, nil, "" +} + +func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(KX) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad KX Pref", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Exchanger = l.token + if l.token == "@" { + rr.Exchanger = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad KX Exchanger", l}, "" + } + if rr.Exchanger[l.length-1] != '.' { + rr.Exchanger = appendOrigin(rr.Exchanger, o) + } + return rr, nil, "" +} + +func setCNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CNAME) + rr.Hdr = h + + l := <-c + rr.Target = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Target = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad CNAME Target", l}, "" + } + if rr.Target[l.length-1] != '.' { + rr.Target = appendOrigin(rr.Target, o) + } + return rr, nil, "" +} + +func setDNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(DNAME) + rr.Hdr = h + + l := <-c + rr.Target = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Target = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad CNAME Target", l}, "" + } + if rr.Target[l.length-1] != '.' { + rr.Target = appendOrigin(rr.Target, o) + } + return rr, nil, "" +} + +func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SOA) + rr.Hdr = h + + l := <-c + rr.Ns = l.token + if l.length == 0 { + return rr, nil, "" + } + <-c // zBlank + if l.token == "@" { + rr.Ns = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad SOA Ns", l}, "" + } + if rr.Ns[l.length-1] != '.' { + rr.Ns = appendOrigin(rr.Ns, o) + } + } + + l = <-c + rr.Mbox = l.token + if l.token == "@" { + rr.Mbox = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad SOA Mbox", l}, "" + } + if rr.Mbox[l.length-1] != '.' { + rr.Mbox = appendOrigin(rr.Mbox, o) + } + } + <-c // zBlank + + var ( + v uint32 + ok bool + ) + for i := 0; i < 5; i++ { + l = <-c + if l.err { + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + } + if j, e := strconv.Atoi(l.token); e != nil { + if i == 0 { + // Serial should be a number + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + } + if v, ok = stringToTtl(l.token); !ok { + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + + } + } else { + v = uint32(j) + } + switch i { + case 0: + rr.Serial = v + <-c // zBlank + case 1: + rr.Refresh = v + <-c // zBlank + case 2: + rr.Retry = v + <-c // zBlank + case 3: + rr.Expire = v + <-c // zBlank + case 4: + rr.Minttl = v + } + } + return rr, nil, "" +} + +func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SRV) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Priority", l}, "" + } + rr.Priority = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Weight", l}, "" + } + rr.Weight = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Port", l}, "" + } + rr.Port = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Target = l.token + if l.token == "@" { + rr.Target = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad SRV Target", l}, "" + } + if rr.Target[l.length-1] != '.' { + rr.Target = appendOrigin(rr.Target, o) + } + return rr, nil, "" +} + +func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NAPTR) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NAPTR Order", l}, "" + } + rr.Order = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NAPTR Preference", l}, "" + } + rr.Preference = uint16(i) + // Flags + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Flags = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + } else if l.value == zQuote { + rr.Flags = "" + } else { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + + // Service + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Service = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + } else if l.value == zQuote { + rr.Service = "" + } else { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + + // Regexp + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Regexp = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + } else if l.value == zQuote { + rr.Regexp = "" + } else { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + // After quote no space?? + <-c // zBlank + l = <-c // zString + rr.Replacement = l.token + if l.token == "@" { + rr.Replacement = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad NAPTR Replacement", l}, "" + } + if rr.Replacement[l.length-1] != '.' { + rr.Replacement = appendOrigin(rr.Replacement, o) + } + return rr, nil, "" +} + +func setTALINK(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TALINK) + rr.Hdr = h + + l := <-c + rr.PreviousName = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.PreviousName = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad TALINK PreviousName", l}, "" + } + if rr.PreviousName[l.length-1] != '.' { + rr.PreviousName = appendOrigin(rr.PreviousName, o) + } + } + <-c // zBlank + l = <-c + rr.NextName = l.token + if l.token == "@" { + rr.NextName = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad TALINK NextName", l}, "" + } + if rr.NextName[l.length-1] != '.' { + rr.NextName = appendOrigin(rr.NextName, o) + } + return rr, nil, "" +} + +func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(LOC) + rr.Hdr = h + // Non zero defaults for LOC record, see RFC 1876, Section 3. + rr.HorizPre = 165 // 10000 + rr.VertPre = 162 // 10 + rr.Size = 18 // 1 + ok := false + // North + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude", l}, "" + } + rr.Latitude = 1000 * 60 * 60 * uint32(i) + + <-c // zBlank + // Either number, 'N' or 'S' + l = <-c + if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { + goto East + } + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude minutes", l}, "" + } + rr.Latitude += 1000 * 60 * uint32(i) + + <-c // zBlank + l = <-c + if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude seconds", l}, "" + } else { + rr.Latitude += uint32(1000 * i) + } + <-c // zBlank + // Either number, 'N' or 'S' + l = <-c + if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { + goto East + } + // If still alive, flag an error + return nil, &ParseError{f, "bad LOC Latitude North/South", l}, "" + +East: + // East + <-c // zBlank + l = <-c + if i, e := strconv.Atoi(l.token); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude", l}, "" + } else { + rr.Longitude = 1000 * 60 * 60 * uint32(i) + } + <-c // zBlank + // Either number, 'E' or 'W' + l = <-c + if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { + goto Altitude + } + if i, e := strconv.Atoi(l.token); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude minutes", l}, "" + } else { + rr.Longitude += 1000 * 60 * uint32(i) + } + <-c // zBlank + l = <-c + if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude seconds", l}, "" + } else { + rr.Longitude += uint32(1000 * i) + } + <-c // zBlank + // Either number, 'E' or 'W' + l = <-c + if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { + goto Altitude + } + // If still alive, flag an error + return nil, &ParseError{f, "bad LOC Longitude East/West", l}, "" + +Altitude: + <-c // zBlank + l = <-c + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad LOC Altitude", l}, "" + } + if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { + l.token = l.token[0 : len(l.token)-1] + } + if i, e := strconv.ParseFloat(l.token, 32); e != nil { + return nil, &ParseError{f, "bad LOC Altitude", l}, "" + } else { + rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) + } + + // And now optionally the other values + l = <-c + count := 0 + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zString: + switch count { + case 0: // Size + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC Size", l}, "" + } + rr.Size = (e & 0x0f) | (m << 4 & 0xf0) + case 1: // HorizPre + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC HorizPre", l}, "" + } + rr.HorizPre = (e & 0x0f) | (m << 4 & 0xf0) + case 2: // VertPre + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC VertPre", l}, "" + } + rr.VertPre = (e & 0x0f) | (m << 4 & 0xf0) + } + count++ + case zBlank: + // Ok + default: + return nil, &ParseError{f, "bad LOC Size, HorizPre or VertPre", l}, "" + } + l = <-c + } + return rr, nil, "" +} + +func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(HIP) + rr.Hdr = h + + // HitLength is not represented + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, "" + } + rr.PublicKeyAlgorithm = uint8(i) + <-c // zBlank + l = <-c // zString + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad HIP Hit", l}, "" + } + rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6. + rr.HitLength = uint8(len(rr.Hit)) / 2 + + <-c // zBlank + l = <-c // zString + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad HIP PublicKey", l}, "" + } + rr.PublicKey = l.token // This cannot contain spaces + rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey))) + + // RendezvousServers (if any) + l = <-c + var xs []string + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zString: + if l.token == "@" { + xs = append(xs, o) + l = <-c + continue + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" + } + if l.token[l.length-1] != '.' { + l.token = appendOrigin(l.token, o) + } + xs = append(xs, l.token) + case zBlank: + // Ok + default: + return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" + } + l = <-c + } + rr.RendezvousServers = xs + return rr, nil, l.comment +} + +func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CERT) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + if v, ok := StringToCertType[l.token]; ok { + rr.Type = v + } else if i, e := strconv.Atoi(l.token); e != nil { + return nil, &ParseError{f, "bad CERT Type", l}, "" + } else { + rr.Type = uint16(i) + } + <-c // zBlank + l = <-c // zString + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad CERT KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c // zString + if v, ok := StringToAlgorithm[l.token]; ok { + rr.Algorithm = v + } else if i, e := strconv.Atoi(l.token); e != nil { + return nil, &ParseError{f, "bad CERT Algorithm", l}, "" + } else { + rr.Algorithm = uint8(i) + } + s, e1, c1 := endingToString(c, "bad CERT Certificate", f) + if e1 != nil { + return nil, e1, c1 + } + rr.Certificate = s + return rr, nil, c1 +} + +func setOPENPGPKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(OPENPGPKEY) + rr.Hdr = h + + s, e, c1 := endingToString(c, "bad OPENPGPKEY PublicKey", f) + if e != nil { + return nil, e, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setRRSIG(h, c, o, f) + if r != nil { + return &SIG{*r.(*RRSIG)}, e, s + } + return nil, e, s +} + +func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RRSIG) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + if t, ok := StringToType[l.tokenUpper]; !ok { + if strings.HasPrefix(l.tokenUpper, "TYPE") { + t, ok = typeToInt(l.tokenUpper) + if !ok { + return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" + } + rr.TypeCovered = t + } else { + return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" + } + } else { + rr.TypeCovered = t + } + <-c // zBlank + l = <-c + i, err := strconv.Atoi(l.token) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + <-c // zBlank + l = <-c + i, err = strconv.Atoi(l.token) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG Labels", l}, "" + } + rr.Labels = uint8(i) + <-c // zBlank + l = <-c + i, err = strconv.Atoi(l.token) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, "" + } + rr.OrigTtl = uint32(i) + <-c // zBlank + l = <-c + if i, err := StringToTime(l.token); err != nil { + // Try to see if all numeric and use it as epoch + if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { + // TODO(miek): error out on > MAX_UINT32, same below + rr.Expiration = uint32(i) + } else { + return nil, &ParseError{f, "bad RRSIG Expiration", l}, "" + } + } else { + rr.Expiration = i + } + <-c // zBlank + l = <-c + if i, err := StringToTime(l.token); err != nil { + if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { + rr.Inception = uint32(i) + } else { + return nil, &ParseError{f, "bad RRSIG Inception", l}, "" + } + } else { + rr.Inception = i + } + <-c // zBlank + l = <-c + i, err = strconv.Atoi(l.token) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c + rr.SignerName = l.token + if l.token == "@" { + rr.SignerName = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RRSIG SignerName", l}, "" + } + if rr.SignerName[l.length-1] != '.' { + rr.SignerName = appendOrigin(rr.SignerName, o) + } + } + s, e, c1 := endingToString(c, "bad RRSIG Signature", f) + if e != nil { + return nil, e, c1 + } + rr.Signature = s + return rr, nil, c1 +} + +func setNSEC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC) + rr.Hdr = h + + l := <-c + rr.NextDomain = l.token + if l.length == 0 { + return rr, nil, l.comment + } + if l.token == "@" { + rr.NextDomain = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad NSEC NextDomain", l}, "" + } + if rr.NextDomain[l.length-1] != '.' { + rr.NextDomain = appendOrigin(rr.NextDomain, o) + } + } + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l = <-c + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + if k, ok = StringToType[l.tokenUpper]; !ok { + if k, ok = typeToInt(l.tokenUpper); !ok { + return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" + } + l = <-c + } + return rr, nil, l.comment +} + +func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC3) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Hash", l}, "" + } + rr.Hash = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Flags", l}, "" + } + rr.Flags = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Iterations", l}, "" + } + rr.Iterations = uint16(i) + <-c + l = <-c + if len(l.token) == 0 || l.err { + return nil, &ParseError{f, "bad NSEC3 Salt", l}, "" + } + rr.SaltLength = uint8(len(l.token)) / 2 + rr.Salt = l.token + + <-c + l = <-c + if len(l.token) == 0 || l.err { + return nil, &ParseError{f, "bad NSEC3 NextDomain", l}, "" + } + rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits) + rr.NextDomain = l.token + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l = <-c + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + if k, ok = StringToType[l.tokenUpper]; !ok { + if k, ok = typeToInt(l.tokenUpper); !ok { + return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" + } + l = <-c + } + return rr, nil, l.comment +} + +func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC3PARAM) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, "" + } + rr.Hash = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, "" + } + rr.Flags = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, "" + } + rr.Iterations = uint16(i) + <-c + l = <-c + rr.SaltLength = uint8(len(l.token)) + rr.Salt = l.token + return rr, nil, "" +} + +func setEUI48(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EUI48) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + if l.length != 17 || l.err { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + addr := make([]byte, 12) + dash := 0 + for i := 0; i < 10; i += 2 { + addr[i] = l.token[i+dash] + addr[i+1] = l.token[i+1+dash] + dash++ + if l.token[i+1+dash] != '-' { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + } + addr[10] = l.token[15] + addr[11] = l.token[16] + + i, e := strconv.ParseUint(string(addr), 16, 48) + if e != nil { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + rr.Address = i + return rr, nil, "" +} + +func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EUI64) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + if l.length != 23 || l.err { + return nil, &ParseError{f, "bad EUI64 Address", l}, "" + } + addr := make([]byte, 16) + dash := 0 + for i := 0; i < 14; i += 2 { + addr[i] = l.token[i+dash] + addr[i+1] = l.token[i+1+dash] + dash++ + if l.token[i+1+dash] != '-' { + return nil, &ParseError{f, "bad EUI64 Address", l}, "" + } + } + addr[14] = l.token[21] + addr[15] = l.token[22] + + i, e := strconv.ParseUint(string(addr), 16, 64) + if e != nil { + return nil, &ParseError{f, "bad EUI68 Address", l}, "" + } + rr.Address = uint64(i) + return rr, nil, "" +} + +func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SSHFP) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SSHFP Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SSHFP Type", l}, "" + } + rr.Type = uint8(i) + <-c // zBlank + s, e1, c1 := endingToString(c, "bad SSHFP Fingerprint", f) + if e1 != nil { + return nil, e1, c1 + } + rr.FingerPrint = s + return rr, nil, "" +} + +func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { + rr := new(DNSKEY) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Flags", l}, "" + } + rr.Flags = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Protocol", l}, "" + } + rr.Protocol = uint8(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + s, e1, c1 := endingToString(c, "bad "+typ+" PublicKey", f) + if e1 != nil { + return nil, e1, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "KEY") + if r != nil { + return &KEY{*r.(*DNSKEY)}, e, s + } + return nil, e, s +} + +func setDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "DNSKEY") + return r, e, s +} + +func setCDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "CDNSKEY") + if r != nil { + return &CDNSKEY{*r.(*DNSKEY)}, e, s + } + return nil, e, s +} + +func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RKEY) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Flags", l}, "" + } + rr.Flags = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Protocol", l}, "" + } + rr.Protocol = uint8(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + s, e1, c1 := endingToString(c, "bad RKEY PublicKey", f) + if e1 != nil { + return nil, e1, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setEID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EID) + rr.Hdr = h + s, e, c1 := endingToString(c, "bad EID Endpoint", f) + if e != nil { + return nil, e, c1 + } + rr.Endpoint = s + return rr, nil, c1 +} + +func setNIMLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NIMLOC) + rr.Hdr = h + s, e, c1 := endingToString(c, "bad NIMLOC Locator", f) + if e != nil { + return nil, e, c1 + } + rr.Locator = s + return rr, nil, c1 +} + +func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(GPOS) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, "" + } + _, e := strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Longitude", l}, "" + } + rr.Longitude = l.token + <-c // zBlank + l = <-c + _, e = strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Latitude", l}, "" + } + rr.Latitude = l.token + <-c // zBlank + l = <-c + _, e = strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Altitude", l}, "" + } + rr.Altitude = l.token + return rr, nil, "" +} + +func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { + rr := new(DS) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c + if i, e := strconv.Atoi(l.token); e != nil { + i, ok := StringToAlgorithm[l.tokenUpper] + if !ok || l.err { + return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" + } + rr.Algorithm = i + } else { + rr.Algorithm = uint8(i) + } + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " DigestType", l}, "" + } + rr.DigestType = uint8(i) + s, e1, c1 := endingToString(c, "bad "+typ+" Digest", f) + if e1 != nil { + return nil, e1, c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "DS") + return r, e, s +} + +func setDLV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "DLV") + if r != nil { + return &DLV{*r.(*DS)}, e, s + } + return nil, e, s +} + +func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "CDS") + if r != nil { + return &CDS{*r.(*DS)}, e, s + } + return nil, e, s +} + +func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TA) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad TA KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c + if i, e := strconv.Atoi(l.token); e != nil { + i, ok := StringToAlgorithm[l.tokenUpper] + if !ok || l.err { + return nil, &ParseError{f, "bad TA Algorithm", l}, "" + } + rr.Algorithm = i + } else { + rr.Algorithm = uint8(i) + } + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad TA DigestType", l}, "" + } + rr.DigestType = uint8(i) + s, e, c1 := endingToString(c, "bad TA Digest", f) + if e != nil { + return nil, e.(*ParseError), c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TLSA) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA Usage", l}, "" + } + rr.Usage = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA Selector", l}, "" + } + rr.Selector = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA MatchingType", l}, "" + } + rr.MatchingType = uint8(i) + // So this needs be e2 (i.e. different than e), because...??t + s, e2, c1 := endingToString(c, "bad TLSA Certificate", f) + if e2 != nil { + return nil, e2, c1 + } + rr.Certificate = s + return rr, nil, c1 +} + +func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RFC3597) + rr.Hdr = h + l := <-c + if l.token != "\\#" { + return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" + } + <-c // zBlank + l = <-c + rdlength, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad RFC3597 Rdata ", l}, "" + } + + s, e1, c1 := endingToString(c, "bad RFC3597 Rdata", f) + if e1 != nil { + return nil, e1, c1 + } + if rdlength*2 != len(s) { + return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" + } + rr.Rdata = s + return rr, nil, c1 +} + +func setSPF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SPF) + rr.Hdr = h + + s, e, c1 := endingToTxtSlice(c, "bad SPF Txt", f) + if e != nil { + return nil, e, "" + } + rr.Txt = s + return rr, nil, c1 +} + +func setTXT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TXT) + rr.Hdr = h + + // no zBlank reading here, because all this rdata is TXT + s, e, c1 := endingToTxtSlice(c, "bad TXT Txt", f) + if e != nil { + return nil, e, "" + } + rr.Txt = s + return rr, nil, c1 +} + +// identical to setTXT +func setNINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NINFO) + rr.Hdr = h + + s, e, c1 := endingToTxtSlice(c, "bad NINFO ZSData", f) + if e != nil { + return nil, e, "" + } + rr.ZSData = s + return rr, nil, c1 +} + +func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(URI) + rr.Hdr = h + + l := <-c + if l.length == 0 { // Dynamic updates. + return rr, nil, "" + } + + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad URI Priority", l}, "" + } + rr.Priority = uint16(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad URI Weight", l}, "" + } + rr.Weight = uint16(i) + + <-c // zBlank + s, err, c1 := endingToTxtSlice(c, "bad URI Target", f) + if err != nil { + return nil, err, "" + } + if len(s) > 1 { + return nil, &ParseError{f, "bad URI Target", l}, "" + } + rr.Target = s[0] + return rr, nil, c1 +} + +func setDHCID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + // awesome record to parse! + rr := new(DHCID) + rr.Hdr = h + + s, e, c1 := endingToString(c, "bad DHCID Digest", f) + if e != nil { + return nil, e, c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NID) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NID Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + u, err := stringToNodeID(l) + if err != nil || l.err { + return nil, err, "" + } + rr.NodeID = u + return rr, nil, "" +} + +func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(L32) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad L32 Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Locator32 = net.ParseIP(l.token) + if rr.Locator32 == nil || l.err { + return nil, &ParseError{f, "bad L32 Locator", l}, "" + } + return rr, nil, "" +} + +func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(LP) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad LP Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Fqdn = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Fqdn = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad LP Fqdn", l}, "" + } + if rr.Fqdn[l.length-1] != '.' { + rr.Fqdn = appendOrigin(rr.Fqdn, o) + } + return rr, nil, "" +} + +func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(L64) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad L64 Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + u, err := stringToNodeID(l) + if err != nil || l.err { + return nil, err, "" + } + rr.Locator64 = u + return rr, nil, "" +} + +func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(UID) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad UID Uid", l}, "" + } + rr.Uid = uint32(i) + return rr, nil, "" +} + +func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(GID) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad GID Gid", l}, "" + } + rr.Gid = uint32(i) + return rr, nil, "" +} + +func setUINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(UINFO) + rr.Hdr = h + s, e, c1 := endingToTxtSlice(c, "bad UINFO Uinfo", f) + if e != nil { + return nil, e, "" + } + rr.Uinfo = s[0] // silently discard anything above + return rr, nil, c1 +} + +func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(PX) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad PX Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Map822 = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Map822 = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad PX Map822", l}, "" + } + if rr.Map822[l.length-1] != '.' { + rr.Map822 = appendOrigin(rr.Map822, o) + } + <-c // zBlank + l = <-c // zString + rr.Mapx400 = l.token + if l.token == "@" { + rr.Mapx400 = o + return rr, nil, "" + } + _, ok = IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad PX Mapx400", l}, "" + } + if rr.Mapx400[l.length-1] != '.' { + rr.Mapx400 = appendOrigin(rr.Mapx400, o) + } + return rr, nil, "" +} + +func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CAA) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, err := strconv.Atoi(l.token) + if err != nil || l.err { + return nil, &ParseError{f, "bad CAA Flag", l}, "" + } + rr.Flag = uint8(i) + + <-c // zBlank + l = <-c // zString + if l.value != zString { + return nil, &ParseError{f, "bad CAA Tag", l}, "" + } + rr.Tag = l.token + + <-c // zBlank + s, e, c1 := endingToTxtSlice(c, "bad CAA Value", f) + if e != nil { + return nil, e, "" + } + if len(s) > 1 { + return nil, &ParseError{f, "bad CAA Value", l}, "" + } + rr.Value = s[0] + return rr, nil, c1 +} + +var typeToparserFunc = map[uint16]parserFunc{ + TypeAAAA: {setAAAA, false}, + TypeAFSDB: {setAFSDB, false}, + TypeA: {setA, false}, + TypeCAA: {setCAA, true}, + TypeCDS: {setCDS, true}, + TypeCDNSKEY: {setCDNSKEY, true}, + TypeCERT: {setCERT, true}, + TypeCNAME: {setCNAME, false}, + TypeDHCID: {setDHCID, true}, + TypeDLV: {setDLV, true}, + TypeDNAME: {setDNAME, false}, + TypeKEY: {setKEY, true}, + TypeDNSKEY: {setDNSKEY, true}, + TypeDS: {setDS, true}, + TypeEID: {setEID, true}, + TypeEUI48: {setEUI48, false}, + TypeEUI64: {setEUI64, false}, + TypeGID: {setGID, false}, + TypeGPOS: {setGPOS, false}, + TypeHINFO: {setHINFO, true}, + TypeHIP: {setHIP, true}, + TypeKX: {setKX, false}, + TypeL32: {setL32, false}, + TypeL64: {setL64, false}, + TypeLOC: {setLOC, true}, + TypeLP: {setLP, false}, + TypeMB: {setMB, false}, + TypeMD: {setMD, false}, + TypeMF: {setMF, false}, + TypeMG: {setMG, false}, + TypeMINFO: {setMINFO, false}, + TypeMR: {setMR, false}, + TypeMX: {setMX, false}, + TypeNAPTR: {setNAPTR, false}, + TypeNID: {setNID, false}, + TypeNIMLOC: {setNIMLOC, true}, + TypeNINFO: {setNINFO, true}, + TypeNSAPPTR: {setNSAPPTR, false}, + TypeNSEC3PARAM: {setNSEC3PARAM, false}, + TypeNSEC3: {setNSEC3, true}, + TypeNSEC: {setNSEC, true}, + TypeNS: {setNS, false}, + TypeOPENPGPKEY: {setOPENPGPKEY, true}, + TypePTR: {setPTR, false}, + TypePX: {setPX, false}, + TypeSIG: {setSIG, true}, + TypeRKEY: {setRKEY, true}, + TypeRP: {setRP, false}, + TypeRRSIG: {setRRSIG, true}, + TypeRT: {setRT, false}, + TypeSOA: {setSOA, false}, + TypeSPF: {setSPF, true}, + TypeSRV: {setSRV, false}, + TypeSSHFP: {setSSHFP, true}, + TypeTALINK: {setTALINK, false}, + TypeTA: {setTA, true}, + TypeTLSA: {setTLSA, true}, + TypeTXT: {setTXT, true}, + TypeUID: {setUID, false}, + TypeUINFO: {setUINFO, true}, + TypeURI: {setURI, true}, + TypeX25: {setX25, false}, +} diff --git a/vendor/github.com/miekg/dns/scanner.go b/vendor/github.com/miekg/dns/scanner.go new file mode 100644 index 000000000000..c29bc2f388e8 --- /dev/null +++ b/vendor/github.com/miekg/dns/scanner.go @@ -0,0 +1,43 @@ +package dns + +// Implement a simple scanner, return a byte stream from an io reader. + +import ( + "bufio" + "io" + "text/scanner" +) + +type scan struct { + src *bufio.Reader + position scanner.Position + eof bool // Have we just seen a eof +} + +func scanInit(r io.Reader) *scan { + s := new(scan) + s.src = bufio.NewReader(r) + s.position.Line = 1 + return s +} + +// tokenText returns the next byte from the input +func (s *scan) tokenText() (byte, error) { + c, err := s.src.ReadByte() + if err != nil { + return c, err + } + // delay the newline handling until the next token is delivered, + // fixes off-by-one errors when reporting a parse error. + if s.eof == true { + s.position.Line++ + s.position.Column = 0 + s.eof = false + } + if c == '\n' { + s.eof = true + return c, nil + } + s.position.Column++ + return c, nil +} diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go new file mode 100644 index 000000000000..2b4bff49f27b --- /dev/null +++ b/vendor/github.com/miekg/dns/server.go @@ -0,0 +1,732 @@ +// DNS server implementation. + +package dns + +import ( + "bytes" + "crypto/tls" + "encoding/binary" + "io" + "net" + "sync" + "time" +) + +// Maximum number of TCP queries before we close the socket. +const maxTCPQueries = 128 + +// Handler is implemented by any value that implements ServeDNS. +type Handler interface { + ServeDNS(w ResponseWriter, r *Msg) +} + +// A ResponseWriter interface is used by an DNS handler to +// construct an DNS response. +type ResponseWriter interface { + // LocalAddr returns the net.Addr of the server + LocalAddr() net.Addr + // RemoteAddr returns the net.Addr of the client that sent the current request. + RemoteAddr() net.Addr + // WriteMsg writes a reply back to the client. + WriteMsg(*Msg) error + // Write writes a raw buffer back to the client. + Write([]byte) (int, error) + // Close closes the connection. + Close() error + // TsigStatus returns the status of the Tsig. + TsigStatus() error + // TsigTimersOnly sets the tsig timers only boolean. + TsigTimersOnly(bool) + // Hijack lets the caller take over the connection. + // After a call to Hijack(), the DNS package will not do anything with the connection. + Hijack() +} + +type response struct { + hijacked bool // connection has been hijacked by handler + tsigStatus error + tsigTimersOnly bool + tsigRequestMAC string + tsigSecret map[string]string // the tsig secrets + udp *net.UDPConn // i/o connection if UDP was used + tcp net.Conn // i/o connection if TCP was used + udpSession *SessionUDP // oob data to get egress interface right + remoteAddr net.Addr // address of the client + writer Writer // writer to output the raw DNS bits +} + +// ServeMux is an DNS request multiplexer. It matches the +// zone name of each incoming request against a list of +// registered patterns add calls the handler for the pattern +// that most closely matches the zone name. ServeMux is DNSSEC aware, meaning +// that queries for the DS record are redirected to the parent zone (if that +// is also registered), otherwise the child gets the query. +// ServeMux is also safe for concurrent access from multiple goroutines. +type ServeMux struct { + z map[string]Handler + m *sync.RWMutex +} + +// NewServeMux allocates and returns a new ServeMux. +func NewServeMux() *ServeMux { return &ServeMux{z: make(map[string]Handler), m: new(sync.RWMutex)} } + +// DefaultServeMux is the default ServeMux used by Serve. +var DefaultServeMux = NewServeMux() + +// The HandlerFunc type is an adapter to allow the use of +// ordinary functions as DNS handlers. If f is a function +// with the appropriate signature, HandlerFunc(f) is a +// Handler object that calls f. +type HandlerFunc func(ResponseWriter, *Msg) + +// ServeDNS calls f(w, r). +func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) { + f(w, r) +} + +// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets. +func HandleFailed(w ResponseWriter, r *Msg) { + m := new(Msg) + m.SetRcode(r, RcodeServerFailure) + // does not matter if this write fails + w.WriteMsg(m) +} + +func failedHandler() Handler { return HandlerFunc(HandleFailed) } + +// ListenAndServe Starts a server on address and network specified Invoke handler +// for incoming queries. +func ListenAndServe(addr string, network string, handler Handler) error { + server := &Server{Addr: addr, Net: network, Handler: handler} + return server.ListenAndServe() +} + +// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in +// http://golang.org/pkg/net/http/#ListenAndServeTLS +func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return err + } + + config := tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + server := &Server{ + Addr: addr, + Net: "tcp-tls", + TLSConfig: &config, + Handler: handler, + } + + return server.ListenAndServe() +} + +// ActivateAndServe activates a server with a listener from systemd, +// l and p should not both be non-nil. +// If both l and p are not nil only p will be used. +// Invoke handler for incoming queries. +func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error { + server := &Server{Listener: l, PacketConn: p, Handler: handler} + return server.ActivateAndServe() +} + +func (mux *ServeMux) match(q string, t uint16) Handler { + mux.m.RLock() + defer mux.m.RUnlock() + var handler Handler + b := make([]byte, len(q)) // worst case, one label of length q + off := 0 + end := false + for { + l := len(q[off:]) + for i := 0; i < l; i++ { + b[i] = q[off+i] + if b[i] >= 'A' && b[i] <= 'Z' { + b[i] |= ('a' - 'A') + } + } + if h, ok := mux.z[string(b[:l])]; ok { // 'causes garbage, might want to change the map key + if t != TypeDS { + return h + } + // Continue for DS to see if we have a parent too, if so delegeate to the parent + handler = h + } + off, end = NextLabel(q, off) + if end { + break + } + } + // Wildcard match, if we have found nothing try the root zone as a last resort. + if h, ok := mux.z["."]; ok { + return h + } + return handler +} + +// Handle adds a handler to the ServeMux for pattern. +func (mux *ServeMux) Handle(pattern string, handler Handler) { + if pattern == "" { + panic("dns: invalid pattern " + pattern) + } + mux.m.Lock() + mux.z[Fqdn(pattern)] = handler + mux.m.Unlock() +} + +// HandleFunc adds a handler function to the ServeMux for pattern. +func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { + mux.Handle(pattern, HandlerFunc(handler)) +} + +// HandleRemove deregistrars the handler specific for pattern from the ServeMux. +func (mux *ServeMux) HandleRemove(pattern string) { + if pattern == "" { + panic("dns: invalid pattern " + pattern) + } + mux.m.Lock() + delete(mux.z, Fqdn(pattern)) + mux.m.Unlock() +} + +// ServeDNS dispatches the request to the handler whose +// pattern most closely matches the request message. If DefaultServeMux +// is used the correct thing for DS queries is done: a possible parent +// is sought. +// If no handler is found a standard SERVFAIL message is returned +// If the request message does not have exactly one question in the +// question section a SERVFAIL is returned, unlesss Unsafe is true. +func (mux *ServeMux) ServeDNS(w ResponseWriter, request *Msg) { + var h Handler + if len(request.Question) < 1 { // allow more than one question + h = failedHandler() + } else { + if h = mux.match(request.Question[0].Name, request.Question[0].Qtype); h == nil { + h = failedHandler() + } + } + h.ServeDNS(w, request) +} + +// Handle registers the handler with the given pattern +// in the DefaultServeMux. The documentation for +// ServeMux explains how patterns are matched. +func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } + +// HandleRemove deregisters the handle with the given pattern +// in the DefaultServeMux. +func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) } + +// HandleFunc registers the handler function with the given pattern +// in the DefaultServeMux. +func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { + DefaultServeMux.HandleFunc(pattern, handler) +} + +// Writer writes raw DNS messages; each call to Write should send an entire message. +type Writer interface { + io.Writer +} + +// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message. +type Reader interface { + // ReadTCP reads a raw message from a TCP connection. Implementations may alter + // connection properties, for example the read-deadline. + ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) + // ReadUDP reads a raw message from a UDP connection. Implementations may alter + // connection properties, for example the read-deadline. + ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) +} + +// defaultReader is an adapter for the Server struct that implements the Reader interface +// using the readTCP and readUDP func of the embedded Server. +type defaultReader struct { + *Server +} + +func (dr *defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { + return dr.readTCP(conn, timeout) +} + +func (dr *defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { + return dr.readUDP(conn, timeout) +} + +// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader. +// Implementations should never return a nil Reader. +type DecorateReader func(Reader) Reader + +// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer. +// Implementations should never return a nil Writer. +type DecorateWriter func(Writer) Writer + +// A Server defines parameters for running an DNS server. +type Server struct { + // Address to listen on, ":dns" if empty. + Addr string + // if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one + Net string + // TCP Listener to use, this is to aid in systemd's socket activation. + Listener net.Listener + // TLS connection configuration + TLSConfig *tls.Config + // UDP "Listener" to use, this is to aid in systemd's socket activation. + PacketConn net.PacketConn + // Handler to invoke, dns.DefaultServeMux if nil. + Handler Handler + // Default buffer size to use to read incoming UDP messages. If not set + // it defaults to MinMsgSize (512 B). + UDPSize int + // The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second. + ReadTimeout time.Duration + // The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second. + WriteTimeout time.Duration + // TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966). + IdleTimeout func() time.Duration + // Secret(s) for Tsig map[]. + TsigSecret map[string]string + // Unsafe instructs the server to disregard any sanity checks and directly hand the message to + // the handler. It will specifically not check if the query has the QR bit not set. + Unsafe bool + // If NotifyStartedFunc is set it is called once the server has started listening. + NotifyStartedFunc func() + // DecorateReader is optional, allows customization of the process that reads raw DNS messages. + DecorateReader DecorateReader + // DecorateWriter is optional, allows customization of the process that writes raw DNS messages. + DecorateWriter DecorateWriter + + // Graceful shutdown handling + + inFlight sync.WaitGroup + + lock sync.RWMutex + started bool +} + +// ListenAndServe starts a nameserver on the configured address in *Server. +func (srv *Server) ListenAndServe() error { + srv.lock.Lock() + defer srv.lock.Unlock() + if srv.started { + return &Error{err: "server already started"} + } + addr := srv.Addr + if addr == "" { + addr = ":domain" + } + if srv.UDPSize == 0 { + srv.UDPSize = MinMsgSize + } + switch srv.Net { + case "tcp", "tcp4", "tcp6": + a, err := net.ResolveTCPAddr(srv.Net, addr) + if err != nil { + return err + } + l, err := net.ListenTCP(srv.Net, a) + if err != nil { + return err + } + srv.Listener = l + srv.started = true + srv.lock.Unlock() + err = srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + case "tcp-tls", "tcp4-tls", "tcp6-tls": + network := "tcp" + if srv.Net == "tcp4-tls" { + network = "tcp4" + } else if srv.Net == "tcp6" { + network = "tcp6" + } + + l, err := tls.Listen(network, addr, srv.TLSConfig) + if err != nil { + return err + } + srv.Listener = l + srv.started = true + srv.lock.Unlock() + err = srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + case "udp", "udp4", "udp6": + a, err := net.ResolveUDPAddr(srv.Net, addr) + if err != nil { + return err + } + l, err := net.ListenUDP(srv.Net, a) + if err != nil { + return err + } + if e := setUDPSocketOptions(l); e != nil { + return e + } + srv.PacketConn = l + srv.started = true + srv.lock.Unlock() + err = srv.serveUDP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + } + return &Error{err: "bad network"} +} + +// ActivateAndServe starts a nameserver with the PacketConn or Listener +// configured in *Server. Its main use is to start a server from systemd. +func (srv *Server) ActivateAndServe() error { + srv.lock.Lock() + defer srv.lock.Unlock() + if srv.started { + return &Error{err: "server already started"} + } + pConn := srv.PacketConn + l := srv.Listener + if pConn != nil { + if srv.UDPSize == 0 { + srv.UDPSize = MinMsgSize + } + if t, ok := pConn.(*net.UDPConn); ok { + if e := setUDPSocketOptions(t); e != nil { + return e + } + srv.started = true + srv.lock.Unlock() + e := srv.serveUDP(t) + srv.lock.Lock() // to satisfy the defer at the top + return e + } + } + if l != nil { + srv.started = true + srv.lock.Unlock() + e := srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return e + } + return &Error{err: "bad listeners"} +} + +// Shutdown gracefully shuts down a server. After a call to Shutdown, ListenAndServe and +// ActivateAndServe will return. All in progress queries are completed before the server +// is taken down. If the Shutdown is taking longer than the reading timeout an error +// is returned. +func (srv *Server) Shutdown() error { + srv.lock.Lock() + if !srv.started { + srv.lock.Unlock() + return &Error{err: "server not started"} + } + srv.started = false + srv.lock.Unlock() + + if srv.PacketConn != nil { + srv.PacketConn.Close() + } + if srv.Listener != nil { + srv.Listener.Close() + } + + fin := make(chan bool) + go func() { + srv.inFlight.Wait() + fin <- true + }() + + select { + case <-time.After(srv.getReadTimeout()): + return &Error{err: "server shutdown is pending"} + case <-fin: + return nil + } +} + +// getReadTimeout is a helper func to use system timeout if server did not intend to change it. +func (srv *Server) getReadTimeout() time.Duration { + rtimeout := dnsTimeout + if srv.ReadTimeout != 0 { + rtimeout = srv.ReadTimeout + } + return rtimeout +} + +// serveTCP starts a TCP listener for the server. +// Each request is handled in a separate goroutine. +func (srv *Server) serveTCP(l net.Listener) error { + defer l.Close() + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } + + handler := srv.Handler + if handler == nil { + handler = DefaultServeMux + } + rtimeout := srv.getReadTimeout() + // deadline is not used here + for { + rw, err := l.Accept() + if err != nil { + if neterr, ok := err.(net.Error); ok && neterr.Temporary() { + continue + } + return err + } + m, err := reader.ReadTCP(rw, rtimeout) + srv.lock.RLock() + if !srv.started { + srv.lock.RUnlock() + return nil + } + srv.lock.RUnlock() + if err != nil { + continue + } + srv.inFlight.Add(1) + go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw) + } +} + +// serveUDP starts a UDP listener for the server. +// Each request is handled in a separate goroutine. +func (srv *Server) serveUDP(l *net.UDPConn) error { + defer l.Close() + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } + + handler := srv.Handler + if handler == nil { + handler = DefaultServeMux + } + rtimeout := srv.getReadTimeout() + // deadline is not used here + for { + m, s, err := reader.ReadUDP(l, rtimeout) + srv.lock.RLock() + if !srv.started { + srv.lock.RUnlock() + return nil + } + srv.lock.RUnlock() + if err != nil { + continue + } + srv.inFlight.Add(1) + go srv.serve(s.RemoteAddr(), handler, m, l, s, nil) + } +} + +// Serve a new connection. +func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t net.Conn) { + defer srv.inFlight.Done() + + w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s} + if srv.DecorateWriter != nil { + w.writer = srv.DecorateWriter(w) + } else { + w.writer = w + } + + q := 0 // counter for the amount of TCP queries we get + + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } +Redo: + req := new(Msg) + err := req.Unpack(m) + if err != nil { // Send a FormatError back + x := new(Msg) + x.SetRcodeFormatError(req) + w.WriteMsg(x) + goto Exit + } + if !srv.Unsafe && req.Response { + goto Exit + } + + w.tsigStatus = nil + if w.tsigSecret != nil { + if t := req.IsTsig(); t != nil { + secret := t.Hdr.Name + if _, ok := w.tsigSecret[secret]; !ok { + w.tsigStatus = ErrKeyAlg + } + w.tsigStatus = TsigVerify(m, w.tsigSecret[secret], "", false) + w.tsigTimersOnly = false + w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC + } + } + h.ServeDNS(w, req) // Writes back to the client + +Exit: + if w.tcp == nil { + return + } + // TODO(miek): make this number configurable? + if q > maxTCPQueries { // close socket after this many queries + w.Close() + return + } + + if w.hijacked { + return // client calls Close() + } + if u != nil { // UDP, "close" and return + w.Close() + return + } + idleTimeout := tcpIdleTimeout + if srv.IdleTimeout != nil { + idleTimeout = srv.IdleTimeout() + } + m, err = reader.ReadTCP(w.tcp, idleTimeout) + if err == nil { + q++ + goto Redo + } + w.Close() + return +} + +func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { + conn.SetReadDeadline(time.Now().Add(timeout)) + l := make([]byte, 2) + n, err := conn.Read(l) + if err != nil || n != 2 { + if err != nil { + return nil, err + } + return nil, ErrShortRead + } + length := binary.BigEndian.Uint16(l) + if length == 0 { + return nil, ErrShortRead + } + m := make([]byte, int(length)) + n, err = conn.Read(m[:int(length)]) + if err != nil || n == 0 { + if err != nil { + return nil, err + } + return nil, ErrShortRead + } + i := n + for i < int(length) { + j, err := conn.Read(m[i:int(length)]) + if err != nil { + return nil, err + } + i += j + } + n = i + m = m[:n] + return m, nil +} + +func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { + conn.SetReadDeadline(time.Now().Add(timeout)) + m := make([]byte, srv.UDPSize) + n, s, err := ReadFromSessionUDP(conn, m) + if err != nil || n == 0 { + if err != nil { + return nil, nil, err + } + return nil, nil, ErrShortRead + } + m = m[:n] + return m, s, nil +} + +// WriteMsg implements the ResponseWriter.WriteMsg method. +func (w *response) WriteMsg(m *Msg) (err error) { + var data []byte + if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check) + if t := m.IsTsig(); t != nil { + data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly) + if err != nil { + return err + } + _, err = w.writer.Write(data) + return err + } + } + data, err = m.Pack() + if err != nil { + return err + } + _, err = w.writer.Write(data) + return err +} + +// Write implements the ResponseWriter.Write method. +func (w *response) Write(m []byte) (int, error) { + switch { + case w.udp != nil: + n, err := WriteToSessionUDP(w.udp, m, w.udpSession) + return n, err + case w.tcp != nil: + lm := len(m) + if lm < 2 { + return 0, io.ErrShortBuffer + } + if lm > MaxMsgSize { + return 0, &Error{err: "message too large"} + } + l := make([]byte, 2, 2+lm) + binary.BigEndian.PutUint16(l, uint16(lm)) + m = append(l, m...) + + n, err := io.Copy(w.tcp, bytes.NewReader(m)) + return int(n), err + } + panic("not reached") +} + +// LocalAddr implements the ResponseWriter.LocalAddr method. +func (w *response) LocalAddr() net.Addr { + if w.tcp != nil { + return w.tcp.LocalAddr() + } + return w.udp.LocalAddr() +} + +// RemoteAddr implements the ResponseWriter.RemoteAddr method. +func (w *response) RemoteAddr() net.Addr { return w.remoteAddr } + +// TsigStatus implements the ResponseWriter.TsigStatus method. +func (w *response) TsigStatus() error { return w.tsigStatus } + +// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method. +func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b } + +// Hijack implements the ResponseWriter.Hijack method. +func (w *response) Hijack() { w.hijacked = true } + +// Close implements the ResponseWriter.Close method +func (w *response) Close() error { + // Can't close the udp conn, as that is actually the listener. + if w.tcp != nil { + e := w.tcp.Close() + w.tcp = nil + return e + } + return nil +} diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go new file mode 100644 index 000000000000..2dce06af82f6 --- /dev/null +++ b/vendor/github.com/miekg/dns/sig0.go @@ -0,0 +1,219 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "encoding/binary" + "math/big" + "strings" + "time" +) + +// Sign signs a dns.Msg. It fills the signature with the appropriate data. +// The SIG record should have the SignerName, KeyTag, Algorithm, Inception +// and Expiration set. +func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) { + if k == nil { + return nil, ErrPrivKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return nil, ErrKey + } + rr.Header().Rrtype = TypeSIG + rr.Header().Class = ClassANY + rr.Header().Ttl = 0 + rr.Header().Name = "." + rr.OrigTtl = 0 + rr.TypeCovered = 0 + rr.Labels = 0 + + buf := make([]byte, m.Len()+rr.len()) + mbuf, err := m.PackBuffer(buf) + if err != nil { + return nil, err + } + if &buf[0] != &mbuf[0] { + return nil, ErrBuf + } + off, err := PackRR(rr, buf, len(mbuf), nil, false) + if err != nil { + return nil, err + } + buf = buf[:off:cap(buf)] + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return nil, ErrAlg + } + + hasher := hash.New() + // Write SIG rdata + hasher.Write(buf[len(mbuf)+1+2+2+4+2:]) + // Write message + hasher.Write(buf[:len(mbuf)]) + + signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm) + if err != nil { + return nil, err + } + + rr.Signature = toBase64(signature) + sig := string(signature) + + buf = append(buf, sig...) + if len(buf) > int(^uint16(0)) { + return nil, ErrBuf + } + // Adjust sig data length + rdoff := len(mbuf) + 1 + 2 + 2 + 4 + rdlen := binary.BigEndian.Uint16(buf[rdoff:]) + rdlen += uint16(len(sig)) + binary.BigEndian.PutUint16(buf[rdoff:], rdlen) + // Adjust additional count + adc := binary.BigEndian.Uint16(buf[10:]) + adc++ + binary.BigEndian.PutUint16(buf[10:], adc) + return buf, nil +} + +// Verify validates the message buf using the key k. +// It's assumed that buf is a valid message from which rr was unpacked. +func (rr *SIG) Verify(k *KEY, buf []byte) error { + if k == nil { + return ErrKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + + var hash crypto.Hash + switch rr.Algorithm { + case DSA, RSASHA1: + hash = crypto.SHA1 + case RSASHA256, ECDSAP256SHA256: + hash = crypto.SHA256 + case ECDSAP384SHA384: + hash = crypto.SHA384 + case RSASHA512: + hash = crypto.SHA512 + default: + return ErrAlg + } + hasher := hash.New() + + buflen := len(buf) + qdc := binary.BigEndian.Uint16(buf[4:]) + anc := binary.BigEndian.Uint16(buf[6:]) + auc := binary.BigEndian.Uint16(buf[8:]) + adc := binary.BigEndian.Uint16(buf[10:]) + offset := 12 + var err error + for i := uint16(0); i < qdc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type and Class + offset += 2 + 2 + } + for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type, Class and TTL + offset += 2 + 2 + 4 + if offset+1 >= buflen { + continue + } + var rdlen uint16 + rdlen = binary.BigEndian.Uint16(buf[offset:]) + offset += 2 + offset += int(rdlen) + } + if offset >= buflen { + return &Error{err: "overflowing unpacking signed message"} + } + + // offset should be just prior to SIG + bodyend := offset + // owner name SHOULD be root + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip Type, Class, TTL, RDLen + offset += 2 + 2 + 4 + 2 + sigstart := offset + // Skip Type Covered, Algorithm, Labels, Original TTL + offset += 2 + 1 + 1 + 4 + if offset+4+4 >= buflen { + return &Error{err: "overflow unpacking signed message"} + } + expire := binary.BigEndian.Uint32(buf[offset:]) + offset += 4 + incept := binary.BigEndian.Uint32(buf[offset:]) + offset += 4 + now := uint32(time.Now().Unix()) + if now < incept || now > expire { + return ErrTime + } + // Skip key tag + offset += 2 + var signername string + signername, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // If key has come from the DNS name compression might + // have mangled the case of the name + if strings.ToLower(signername) != strings.ToLower(k.Header().Name) { + return &Error{err: "signer name doesn't match key name"} + } + sigend := offset + hasher.Write(buf[sigstart:sigend]) + hasher.Write(buf[:10]) + hasher.Write([]byte{ + byte((adc - 1) << 8), + byte(adc - 1), + }) + hasher.Write(buf[12:bodyend]) + + hashed := hasher.Sum(nil) + sig := buf[sigend:] + switch k.Algorithm { + case DSA: + pk := k.publicKeyDSA() + sig = sig[1:] + r := big.NewInt(0) + r.SetBytes(sig[:len(sig)/2]) + s := big.NewInt(0) + s.SetBytes(sig[len(sig)/2:]) + if pk != nil { + if dsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + case RSASHA1, RSASHA256, RSASHA512: + pk := k.publicKeyRSA() + if pk != nil { + return rsa.VerifyPKCS1v15(pk, hash, hashed, sig) + } + case ECDSAP256SHA256, ECDSAP384SHA384: + pk := k.publicKeyECDSA() + r := big.NewInt(0) + r.SetBytes(sig[:len(sig)/2]) + s := big.NewInt(0) + s.SetBytes(sig[len(sig)/2:]) + if pk != nil { + if ecdsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + } + return ErrKeyAlg +} diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go new file mode 100644 index 000000000000..9573c7d0b8c2 --- /dev/null +++ b/vendor/github.com/miekg/dns/singleinflight.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Adapted for dns package usage by Miek Gieben. + +package dns + +import "sync" +import "time" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + val *Msg + rtt time.Duration + err error + dups int +} + +// singleflight represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type singleflight struct { + sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) { + g.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.Unlock() + c.wg.Wait() + return c.val, c.rtt, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.Unlock() + + c.val, c.rtt, c.err = fn() + c.wg.Done() + + g.Lock() + delete(g.m, key) + g.Unlock() + + return c.val, c.rtt, c.err, c.dups > 0 +} diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/miekg/dns/tlsa.go new file mode 100644 index 000000000000..34fe6615aa2f --- /dev/null +++ b/vendor/github.com/miekg/dns/tlsa.go @@ -0,0 +1,86 @@ +package dns + +import ( + "crypto/sha256" + "crypto/sha512" + "crypto/x509" + "encoding/hex" + "errors" + "io" + "net" + "strconv" +) + +// CertificateToDANE converts a certificate to a hex string as used in the TLSA record. +func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) { + switch matchingType { + case 0: + switch selector { + case 0: + return hex.EncodeToString(cert.Raw), nil + case 1: + return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil + } + case 1: + h := sha256.New() + switch selector { + case 0: + io.WriteString(h, string(cert.Raw)) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + io.WriteString(h, string(cert.RawSubjectPublicKeyInfo)) + return hex.EncodeToString(h.Sum(nil)), nil + } + case 2: + h := sha512.New() + switch selector { + case 0: + io.WriteString(h, string(cert.Raw)) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + io.WriteString(h, string(cert.RawSubjectPublicKeyInfo)) + return hex.EncodeToString(h.Sum(nil)), nil + } + } + return "", errors.New("dns: bad TLSA MatchingType or TLSA Selector") +} + +// Sign creates a TLSA record from an SSL certificate. +func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { + r.Hdr.Rrtype = TypeTLSA + r.Usage = uint8(usage) + r.Selector = uint8(selector) + r.MatchingType = uint8(matchingType) + + r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err + } + return nil +} + +// Verify verifies a TLSA record against an SSL certificate. If it is OK +// a nil error is returned. +func (r *TLSA) Verify(cert *x509.Certificate) error { + c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err // Not also ErrSig? + } + if r.Certificate == c { + return nil + } + return ErrSig // ErrSig, really? +} + +// TLSAName returns the ownername of a TLSA resource record as per the +// rules specified in RFC 6698, Section 3. +func TLSAName(name, service, network string) (string, error) { + if !IsFqdn(name) { + return "", ErrFqdn + } + p, err := net.LookupPort(network, service) + if err != nil { + return "", err + } + return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil +} diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go new file mode 100644 index 000000000000..78365e1c5bac --- /dev/null +++ b/vendor/github.com/miekg/dns/tsig.go @@ -0,0 +1,384 @@ +package dns + +import ( + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/binary" + "encoding/hex" + "hash" + "io" + "strconv" + "strings" + "time" +) + +// HMAC hashing codes. These are transmitted as domain names. +const ( + HmacMD5 = "hmac-md5.sig-alg.reg.int." + HmacSHA1 = "hmac-sha1." + HmacSHA256 = "hmac-sha256." + HmacSHA512 = "hmac-sha512." +) + +// TSIG is the RR the holds the transaction signature of a message. +// See RFC 2845 and RFC 4635. +type TSIG struct { + Hdr RR_Header + Algorithm string `dns:"domain-name"` + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 + MACSize uint16 + MAC string `dns:"size-hex:MACSize"` + OrigId uint16 + Error uint16 + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// TSIG has no official presentation format, but this will suffice. + +func (rr *TSIG) String() string { + s := "\n;; TSIG PSEUDOSECTION:\n" + s += rr.Hdr.String() + + " " + rr.Algorithm + + " " + tsigTimeToString(rr.TimeSigned) + + " " + strconv.Itoa(int(rr.Fudge)) + + " " + strconv.Itoa(int(rr.MACSize)) + + " " + strings.ToUpper(rr.MAC) + + " " + strconv.Itoa(int(rr.OrigId)) + + " " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR + " " + strconv.Itoa(int(rr.OtherLen)) + + " " + rr.OtherData + return s +} + +// The following values must be put in wireformat, so that the MAC can be calculated. +// RFC 2845, section 3.4.2. TSIG Variables. +type tsigWireFmt struct { + // From RR_Header + Name string `dns:"domain-name"` + Class uint16 + Ttl uint32 + // Rdata of the TSIG + Algorithm string `dns:"domain-name"` + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 + // MACSize, MAC and OrigId excluded + Error uint16 + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC +type macWireFmt struct { + MACSize uint16 + MAC string `dns:"size-hex:MACSize"` +} + +// 3.3. Time values used in TSIG calculations +type timerWireFmt struct { + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 +} + +// TsigGenerate fills out the TSIG record attached to the message. +// The message should contain +// a "stub" TSIG RR with the algorithm, key name (owner name of the RR), +// time fudge (defaults to 300 seconds) and the current time +// The TSIG MAC is saved in that Tsig RR. +// When TsigGenerate is called for the first time requestMAC is set to the empty string and +// timersOnly is false. +// If something goes wrong an error is returned, otherwise it is nil. +func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) { + if m.IsTsig() == nil { + panic("dns: TSIG not last RR in additional") + } + // If we barf here, the caller is to blame + rawsecret, err := fromBase64([]byte(secret)) + if err != nil { + return nil, "", err + } + + rr := m.Extra[len(m.Extra)-1].(*TSIG) + m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg + mbuf, err := m.Pack() + if err != nil { + return nil, "", err + } + buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly) + + t := new(TSIG) + var h hash.Hash + switch strings.ToLower(rr.Algorithm) { + case HmacMD5: + h = hmac.New(md5.New, []byte(rawsecret)) + case HmacSHA1: + h = hmac.New(sha1.New, []byte(rawsecret)) + case HmacSHA256: + h = hmac.New(sha256.New, []byte(rawsecret)) + case HmacSHA512: + h = hmac.New(sha512.New, []byte(rawsecret)) + default: + return nil, "", ErrKeyAlg + } + io.WriteString(h, string(buf)) + t.MAC = hex.EncodeToString(h.Sum(nil)) + t.MACSize = uint16(len(t.MAC) / 2) // Size is half! + + t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0} + t.Fudge = rr.Fudge + t.TimeSigned = rr.TimeSigned + t.Algorithm = rr.Algorithm + t.OrigId = m.Id + + tbuf := make([]byte, t.len()) + if off, err := PackRR(t, tbuf, 0, nil, false); err == nil { + tbuf = tbuf[:off] // reset to actual size used + } else { + return nil, "", err + } + mbuf = append(mbuf, tbuf...) + // Update the ArCount directly in the buffer. + binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1)) + + return mbuf, t.MAC, nil +} + +// TsigVerify verifies the TSIG on a message. +// If the signature does not validate err contains the +// error, otherwise it is nil. +func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { + rawsecret, err := fromBase64([]byte(secret)) + if err != nil { + return err + } + // Strip the TSIG from the incoming msg + stripped, tsig, err := stripTsig(msg) + if err != nil { + return err + } + + msgMAC, err := hex.DecodeString(tsig.MAC) + if err != nil { + return err + } + + buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly) + + // Fudge factor works both ways. A message can arrive before it was signed because + // of clock skew. + now := uint64(time.Now().Unix()) + ti := now - tsig.TimeSigned + if now < tsig.TimeSigned { + ti = tsig.TimeSigned - now + } + if uint64(tsig.Fudge) < ti { + return ErrTime + } + + var h hash.Hash + switch strings.ToLower(tsig.Algorithm) { + case HmacMD5: + h = hmac.New(md5.New, rawsecret) + case HmacSHA1: + h = hmac.New(sha1.New, rawsecret) + case HmacSHA256: + h = hmac.New(sha256.New, rawsecret) + case HmacSHA512: + h = hmac.New(sha512.New, rawsecret) + default: + return ErrKeyAlg + } + h.Write(buf) + if !hmac.Equal(h.Sum(nil), msgMAC) { + return ErrSig + } + return nil +} + +// Create a wiredata buffer for the MAC calculation. +func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte { + var buf []byte + if rr.TimeSigned == 0 { + rr.TimeSigned = uint64(time.Now().Unix()) + } + if rr.Fudge == 0 { + rr.Fudge = 300 // Standard (RFC) default. + } + + if requestMAC != "" { + m := new(macWireFmt) + m.MACSize = uint16(len(requestMAC) / 2) + m.MAC = requestMAC + buf = make([]byte, len(requestMAC)) // long enough + n, _ := packMacWire(m, buf) + buf = buf[:n] + } + + tsigvar := make([]byte, DefaultMsgSize) + if timersOnly { + tsig := new(timerWireFmt) + tsig.TimeSigned = rr.TimeSigned + tsig.Fudge = rr.Fudge + n, _ := packTimerWire(tsig, tsigvar) + tsigvar = tsigvar[:n] + } else { + tsig := new(tsigWireFmt) + tsig.Name = strings.ToLower(rr.Hdr.Name) + tsig.Class = ClassANY + tsig.Ttl = rr.Hdr.Ttl + tsig.Algorithm = strings.ToLower(rr.Algorithm) + tsig.TimeSigned = rr.TimeSigned + tsig.Fudge = rr.Fudge + tsig.Error = rr.Error + tsig.OtherLen = rr.OtherLen + tsig.OtherData = rr.OtherData + n, _ := packTsigWire(tsig, tsigvar) + tsigvar = tsigvar[:n] + } + + if requestMAC != "" { + x := append(buf, msgbuf...) + buf = append(x, tsigvar...) + } else { + buf = append(msgbuf, tsigvar...) + } + return buf +} + +// Strip the TSIG from the raw message. +func stripTsig(msg []byte) ([]byte, *TSIG, error) { + // Copied from msg.go's Unpack() Header, but modified. + var ( + dh Header + err error + ) + off, tsigoff := 0, 0 + + if dh, off, err = unpackMsgHdr(msg, off); err != nil { + return nil, nil, err + } + if dh.Arcount == 0 { + return nil, nil, ErrNoSig + } + + // Rcode, see msg.go Unpack() + if int(dh.Bits&0xF) == RcodeNotAuth { + return nil, nil, ErrAuth + } + + for i := 0; i < int(dh.Qdcount); i++ { + _, off, err = unpackQuestion(msg, off) + if err != nil { + return nil, nil, err + } + } + + _, off, err = unpackRRslice(int(dh.Ancount), msg, off) + if err != nil { + return nil, nil, err + } + _, off, err = unpackRRslice(int(dh.Nscount), msg, off) + if err != nil { + return nil, nil, err + } + + rr := new(TSIG) + var extra RR + for i := 0; i < int(dh.Arcount); i++ { + tsigoff = off + extra, off, err = UnpackRR(msg, off) + if err != nil { + return nil, nil, err + } + if extra.Header().Rrtype == TypeTSIG { + rr = extra.(*TSIG) + // Adjust Arcount. + arcount := binary.BigEndian.Uint16(msg[10:]) + binary.BigEndian.PutUint16(msg[10:], arcount-1) + break + } + } + if rr == nil { + return nil, nil, ErrNoSig + } + return msg[:tsigoff], rr, nil +} + +// Translate the TSIG time signed into a date. There is no +// need for RFC1982 calculations as this date is 48 bits. +func tsigTimeToString(t uint64) string { + ti := time.Unix(int64(t), 0).UTC() + return ti.Format("20060102150405") +} + +func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) { + // copied from zmsg.go TSIG packing + // RR_Header + off, err := PackDomainName(tw.Name, msg, 0, nil, false) + if err != nil { + return off, err + } + off, err = packUint16(tw.Class, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(tw.Ttl, msg, off) + if err != nil { + return off, err + } + + off, err = PackDomainName(tw.Algorithm, msg, off, nil, false) + if err != nil { + return off, err + } + off, err = packUint48(tw.TimeSigned, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(tw.Fudge, msg, off) + if err != nil { + return off, err + } + + off, err = packUint16(tw.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(tw.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(tw.OtherData, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func packMacWire(mw *macWireFmt, msg []byte) (int, error) { + off, err := packUint16(mw.MACSize, msg, 0) + if err != nil { + return off, err + } + off, err = packStringHex(mw.MAC, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) { + off, err := packUint48(tw.TimeSigned, msg, 0) + if err != nil { + return off, err + } + off, err = packUint16(tw.Fudge, msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go new file mode 100644 index 000000000000..e7370647900a --- /dev/null +++ b/vendor/github.com/miekg/dns/types.go @@ -0,0 +1,1250 @@ +package dns + +import ( + "fmt" + "net" + "strconv" + "strings" + "time" +) + +type ( + // Type is a DNS type. + Type uint16 + // Class is a DNS class. + Class uint16 + // Name is a DNS domain name. + Name string +) + +// Packet formats + +// Wire constants and supported types. +const ( + // valid RR_Header.Rrtype and Question.qtype + + TypeNone uint16 = 0 + TypeA uint16 = 1 + TypeNS uint16 = 2 + TypeMD uint16 = 3 + TypeMF uint16 = 4 + TypeCNAME uint16 = 5 + TypeSOA uint16 = 6 + TypeMB uint16 = 7 + TypeMG uint16 = 8 + TypeMR uint16 = 9 + TypeNULL uint16 = 10 + TypePTR uint16 = 12 + TypeHINFO uint16 = 13 + TypeMINFO uint16 = 14 + TypeMX uint16 = 15 + TypeTXT uint16 = 16 + TypeRP uint16 = 17 + TypeAFSDB uint16 = 18 + TypeX25 uint16 = 19 + TypeISDN uint16 = 20 + TypeRT uint16 = 21 + TypeNSAPPTR uint16 = 23 + TypeSIG uint16 = 24 + TypeKEY uint16 = 25 + TypePX uint16 = 26 + TypeGPOS uint16 = 27 + TypeAAAA uint16 = 28 + TypeLOC uint16 = 29 + TypeNXT uint16 = 30 + TypeEID uint16 = 31 + TypeNIMLOC uint16 = 32 + TypeSRV uint16 = 33 + TypeATMA uint16 = 34 + TypeNAPTR uint16 = 35 + TypeKX uint16 = 36 + TypeCERT uint16 = 37 + TypeDNAME uint16 = 39 + TypeOPT uint16 = 41 // EDNS + TypeDS uint16 = 43 + TypeSSHFP uint16 = 44 + TypeRRSIG uint16 = 46 + TypeNSEC uint16 = 47 + TypeDNSKEY uint16 = 48 + TypeDHCID uint16 = 49 + TypeNSEC3 uint16 = 50 + TypeNSEC3PARAM uint16 = 51 + TypeTLSA uint16 = 52 + TypeHIP uint16 = 55 + TypeNINFO uint16 = 56 + TypeRKEY uint16 = 57 + TypeTALINK uint16 = 58 + TypeCDS uint16 = 59 + TypeCDNSKEY uint16 = 60 + TypeOPENPGPKEY uint16 = 61 + TypeSPF uint16 = 99 + TypeUINFO uint16 = 100 + TypeUID uint16 = 101 + TypeGID uint16 = 102 + TypeUNSPEC uint16 = 103 + TypeNID uint16 = 104 + TypeL32 uint16 = 105 + TypeL64 uint16 = 106 + TypeLP uint16 = 107 + TypeEUI48 uint16 = 108 + TypeEUI64 uint16 = 109 + TypeURI uint16 = 256 + TypeCAA uint16 = 257 + + TypeTKEY uint16 = 249 + TypeTSIG uint16 = 250 + + // valid Question.Qtype only + TypeIXFR uint16 = 251 + TypeAXFR uint16 = 252 + TypeMAILB uint16 = 253 + TypeMAILA uint16 = 254 + TypeANY uint16 = 255 + + TypeTA uint16 = 32768 + TypeDLV uint16 = 32769 + TypeReserved uint16 = 65535 + + // valid Question.Qclass + ClassINET = 1 + ClassCSNET = 2 + ClassCHAOS = 3 + ClassHESIOD = 4 + ClassNONE = 254 + ClassANY = 255 + + // Message Response Codes. + RcodeSuccess = 0 + RcodeFormatError = 1 + RcodeServerFailure = 2 + RcodeNameError = 3 + RcodeNotImplemented = 4 + RcodeRefused = 5 + RcodeYXDomain = 6 + RcodeYXRrset = 7 + RcodeNXRrset = 8 + RcodeNotAuth = 9 + RcodeNotZone = 10 + RcodeBadSig = 16 // TSIG + RcodeBadVers = 16 // EDNS0 + RcodeBadKey = 17 + RcodeBadTime = 18 + RcodeBadMode = 19 // TKEY + RcodeBadName = 20 + RcodeBadAlg = 21 + RcodeBadTrunc = 22 // TSIG + RcodeBadCookie = 23 // DNS Cookies + + // Message Opcodes. There is no 3. + OpcodeQuery = 0 + OpcodeIQuery = 1 + OpcodeStatus = 2 + OpcodeNotify = 4 + OpcodeUpdate = 5 +) + +// Headers is the wire format for the DNS packet header. +type Header struct { + Id uint16 + Bits uint16 + Qdcount, Ancount, Nscount, Arcount uint16 +} + +const ( + headerSize = 12 + + // Header.Bits + _QR = 1 << 15 // query/response (response=1) + _AA = 1 << 10 // authoritative + _TC = 1 << 9 // truncated + _RD = 1 << 8 // recursion desired + _RA = 1 << 7 // recursion available + _Z = 1 << 6 // Z + _AD = 1 << 5 // authticated data + _CD = 1 << 4 // checking disabled + + LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. + LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. + + LOC_HOURS = 60 * 1000 + LOC_DEGREES = 60 * LOC_HOURS + + LOC_ALTITUDEBASE = 100000 +) + +// Different Certificate Types, see RFC 4398, Section 2.1 +const ( + CertPKIX = 1 + iota + CertSPKI + CertPGP + CertIPIX + CertISPKI + CertIPGP + CertACPKIX + CertIACPKIX + CertURI = 253 + CertOID = 254 +) + +// CertTypeToString converts the Cert Type to its string representation. +// See RFC 4398 and RFC 6944. +var CertTypeToString = map[uint16]string{ + CertPKIX: "PKIX", + CertSPKI: "SPKI", + CertPGP: "PGP", + CertIPIX: "IPIX", + CertISPKI: "ISPKI", + CertIPGP: "IPGP", + CertACPKIX: "ACPKIX", + CertIACPKIX: "IACPKIX", + CertURI: "URI", + CertOID: "OID", +} + +// StringToCertType is the reverseof CertTypeToString. +var StringToCertType = reverseInt16(CertTypeToString) + +//go:generate go run types_generate.go + +// Question holds a DNS question. There can be multiple questions in the +// question section of a message. Usually there is just one. +type Question struct { + Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed) + Qtype uint16 + Qclass uint16 +} + +func (q *Question) len() int { + return len(q.Name) + 1 + 2 + 2 +} + +func (q *Question) String() (s string) { + // prefix with ; (as in dig) + s = ";" + sprintName(q.Name) + "\t" + s += Class(q.Qclass).String() + "\t" + s += " " + Type(q.Qtype).String() + return s +} + +// ANY is a wildcard record. See RFC 1035, Section 3.2.3. ANY +// is named "*" there. +type ANY struct { + Hdr RR_Header + // Does not have any rdata +} + +func (rr *ANY) String() string { return rr.Hdr.String() } + +type CNAME struct { + Hdr RR_Header + Target string `dns:"cdomain-name"` +} + +func (rr *CNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) } + +type HINFO struct { + Hdr RR_Header + Cpu string + Os string +} + +func (rr *HINFO) String() string { + return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os}) +} + +type MB struct { + Hdr RR_Header + Mb string `dns:"cdomain-name"` +} + +func (rr *MB) String() string { return rr.Hdr.String() + sprintName(rr.Mb) } + +type MG struct { + Hdr RR_Header + Mg string `dns:"cdomain-name"` +} + +func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) } + +type MINFO struct { + Hdr RR_Header + Rmail string `dns:"cdomain-name"` + Email string `dns:"cdomain-name"` +} + +func (rr *MINFO) String() string { + return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email) +} + +type MR struct { + Hdr RR_Header + Mr string `dns:"cdomain-name"` +} + +func (rr *MR) String() string { + return rr.Hdr.String() + sprintName(rr.Mr) +} + +type MF struct { + Hdr RR_Header + Mf string `dns:"cdomain-name"` +} + +func (rr *MF) String() string { + return rr.Hdr.String() + sprintName(rr.Mf) +} + +type MD struct { + Hdr RR_Header + Md string `dns:"cdomain-name"` +} + +func (rr *MD) String() string { + return rr.Hdr.String() + sprintName(rr.Md) +} + +type MX struct { + Hdr RR_Header + Preference uint16 + Mx string `dns:"cdomain-name"` +} + +func (rr *MX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx) +} + +type AFSDB struct { + Hdr RR_Header + Subtype uint16 + Hostname string `dns:"cdomain-name"` +} + +func (rr *AFSDB) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname) +} + +type X25 struct { + Hdr RR_Header + PSDNAddress string +} + +func (rr *X25) String() string { + return rr.Hdr.String() + rr.PSDNAddress +} + +type RT struct { + Hdr RR_Header + Preference uint16 + Host string `dns:"cdomain-name"` +} + +func (rr *RT) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host) +} + +type NS struct { + Hdr RR_Header + Ns string `dns:"cdomain-name"` +} + +func (rr *NS) String() string { + return rr.Hdr.String() + sprintName(rr.Ns) +} + +type PTR struct { + Hdr RR_Header + Ptr string `dns:"cdomain-name"` +} + +func (rr *PTR) String() string { + return rr.Hdr.String() + sprintName(rr.Ptr) +} + +type RP struct { + Hdr RR_Header + Mbox string `dns:"domain-name"` + Txt string `dns:"domain-name"` +} + +func (rr *RP) String() string { + return rr.Hdr.String() + rr.Mbox + " " + sprintTxt([]string{rr.Txt}) +} + +type SOA struct { + Hdr RR_Header + Ns string `dns:"cdomain-name"` + Mbox string `dns:"cdomain-name"` + Serial uint32 + Refresh uint32 + Retry uint32 + Expire uint32 + Minttl uint32 +} + +func (rr *SOA) String() string { + return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) + + " " + strconv.FormatInt(int64(rr.Serial), 10) + + " " + strconv.FormatInt(int64(rr.Refresh), 10) + + " " + strconv.FormatInt(int64(rr.Retry), 10) + + " " + strconv.FormatInt(int64(rr.Expire), 10) + + " " + strconv.FormatInt(int64(rr.Minttl), 10) +} + +type TXT struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +func sprintName(s string) string { + src := []byte(s) + dst := make([]byte, 0, len(src)) + for i := 0; i < len(src); { + if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { + dst = append(dst, src[i:i+2]...) + i += 2 + } else { + b, n := nextByte(src, i) + if n == 0 { + i++ // dangling back slash + } else if b == '.' { + dst = append(dst, b) + } else { + dst = appendDomainNameByte(dst, b) + } + i += n + } + } + return string(dst) +} + +func sprintTxtOctet(s string) string { + src := []byte(s) + dst := make([]byte, 0, len(src)) + dst = append(dst, '"') + for i := 0; i < len(src); { + if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { + dst = append(dst, src[i:i+2]...) + i += 2 + } else { + b, n := nextByte(src, i) + if n == 0 { + i++ // dangling back slash + } else if b == '.' { + dst = append(dst, b) + } else { + if b < ' ' || b > '~' { + dst = appendByte(dst, b) + } else { + dst = append(dst, b) + } + } + i += n + } + } + dst = append(dst, '"') + return string(dst) +} + +func sprintTxt(txt []string) string { + var out []byte + for i, s := range txt { + if i > 0 { + out = append(out, ` "`...) + } else { + out = append(out, '"') + } + bs := []byte(s) + for j := 0; j < len(bs); { + b, n := nextByte(bs, j) + if n == 0 { + break + } + out = appendTXTStringByte(out, b) + j += n + } + out = append(out, '"') + } + return string(out) +} + +func appendDomainNameByte(s []byte, b byte) []byte { + switch b { + case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape + return append(s, '\\', b) + } + return appendTXTStringByte(s, b) +} + +func appendTXTStringByte(s []byte, b byte) []byte { + switch b { + case '\t': + return append(s, '\\', 't') + case '\r': + return append(s, '\\', 'r') + case '\n': + return append(s, '\\', 'n') + case '"', '\\': + return append(s, '\\', b) + } + if b < ' ' || b > '~' { + return appendByte(s, b) + } + return append(s, b) +} + +func appendByte(s []byte, b byte) []byte { + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + return s +} + +func nextByte(b []byte, offset int) (byte, int) { + if offset >= len(b) { + return 0, 0 + } + if b[offset] != '\\' { + // not an escape sequence + return b[offset], 1 + } + switch len(b) - offset { + case 1: // dangling escape + return 0, 0 + case 2, 3: // too short to be \ddd + default: // maybe \ddd + if isDigit(b[offset+1]) && isDigit(b[offset+2]) && isDigit(b[offset+3]) { + return dddToByte(b[offset+1:]), 4 + } + } + // not \ddd, maybe a control char + switch b[offset+1] { + case 't': + return '\t', 2 + case 'r': + return '\r', 2 + case 'n': + return '\n', 2 + default: + return b[offset+1], 2 + } +} + +type SPF struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +type SRV struct { + Hdr RR_Header + Priority uint16 + Weight uint16 + Port uint16 + Target string `dns:"domain-name"` +} + +func (rr *SRV) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Priority)) + " " + + strconv.Itoa(int(rr.Weight)) + " " + + strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target) +} + +type NAPTR struct { + Hdr RR_Header + Order uint16 + Preference uint16 + Flags string + Service string + Regexp string + Replacement string `dns:"domain-name"` +} + +func (rr *NAPTR) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Order)) + " " + + strconv.Itoa(int(rr.Preference)) + " " + + "\"" + rr.Flags + "\" " + + "\"" + rr.Service + "\" " + + "\"" + rr.Regexp + "\" " + + rr.Replacement +} + +// The CERT resource record, see RFC 4398. +type CERT struct { + Hdr RR_Header + Type uint16 + KeyTag uint16 + Algorithm uint8 + Certificate string `dns:"base64"` +} + +func (rr *CERT) String() string { + var ( + ok bool + certtype, algorithm string + ) + if certtype, ok = CertTypeToString[rr.Type]; !ok { + certtype = strconv.Itoa(int(rr.Type)) + } + if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok { + algorithm = strconv.Itoa(int(rr.Algorithm)) + } + return rr.Hdr.String() + certtype + + " " + strconv.Itoa(int(rr.KeyTag)) + + " " + algorithm + + " " + rr.Certificate +} + +// The DNAME resource record, see RFC 2672. +type DNAME struct { + Hdr RR_Header + Target string `dns:"domain-name"` +} + +func (rr *DNAME) String() string { + return rr.Hdr.String() + sprintName(rr.Target) +} + +type A struct { + Hdr RR_Header + A net.IP `dns:"a"` +} + +func (rr *A) String() string { + if rr.A == nil { + return rr.Hdr.String() + } + return rr.Hdr.String() + rr.A.String() +} + +type AAAA struct { + Hdr RR_Header + AAAA net.IP `dns:"aaaa"` +} + +func (rr *AAAA) String() string { + if rr.AAAA == nil { + return rr.Hdr.String() + } + return rr.Hdr.String() + rr.AAAA.String() +} + +type PX struct { + Hdr RR_Header + Preference uint16 + Map822 string `dns:"domain-name"` + Mapx400 string `dns:"domain-name"` +} + +func (rr *PX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400) +} + +type GPOS struct { + Hdr RR_Header + Longitude string + Latitude string + Altitude string +} + +func (rr *GPOS) String() string { + return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude +} + +type LOC struct { + Hdr RR_Header + Version uint8 + Size uint8 + HorizPre uint8 + VertPre uint8 + Latitude uint32 + Longitude uint32 + Altitude uint32 +} + +// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent +// format and returns a string in m (two decimals for the cm) +func cmToM(m, e uint8) string { + if e < 2 { + if e == 1 { + m *= 10 + } + + return fmt.Sprintf("0.%02d", m) + } + + s := fmt.Sprintf("%d", m) + for e > 2 { + s += "0" + e-- + } + return s +} + +func (rr *LOC) String() string { + s := rr.Hdr.String() + + lat := rr.Latitude + ns := "N" + if lat > LOC_EQUATOR { + lat = lat - LOC_EQUATOR + } else { + ns = "S" + lat = LOC_EQUATOR - lat + } + h := lat / LOC_DEGREES + lat = lat % LOC_DEGREES + m := lat / LOC_HOURS + lat = lat % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lat) / 1000), ns) + + lon := rr.Longitude + ew := "E" + if lon > LOC_PRIMEMERIDIAN { + lon = lon - LOC_PRIMEMERIDIAN + } else { + ew = "W" + lon = LOC_PRIMEMERIDIAN - lon + } + h = lon / LOC_DEGREES + lon = lon % LOC_DEGREES + m = lon / LOC_HOURS + lon = lon % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lon) / 1000), ew) + + var alt = float64(rr.Altitude) / 100 + alt -= LOC_ALTITUDEBASE + if rr.Altitude%100 != 0 { + s += fmt.Sprintf("%.2fm ", alt) + } else { + s += fmt.Sprintf("%.0fm ", alt) + } + + s += cmToM((rr.Size&0xf0)>>4, rr.Size&0x0f) + "m " + s += cmToM((rr.HorizPre&0xf0)>>4, rr.HorizPre&0x0f) + "m " + s += cmToM((rr.VertPre&0xf0)>>4, rr.VertPre&0x0f) + "m" + + return s +} + +// SIG is identical to RRSIG and nowadays only used for SIG(0), RFC2931. +type SIG struct { + RRSIG +} + +type RRSIG struct { + Hdr RR_Header + TypeCovered uint16 + Algorithm uint8 + Labels uint8 + OrigTtl uint32 + Expiration uint32 + Inception uint32 + KeyTag uint16 + SignerName string `dns:"domain-name"` + Signature string `dns:"base64"` +} + +func (rr *RRSIG) String() string { + s := rr.Hdr.String() + s += Type(rr.TypeCovered).String() + s += " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.Labels)) + + " " + strconv.FormatInt(int64(rr.OrigTtl), 10) + + " " + TimeToString(rr.Expiration) + + " " + TimeToString(rr.Inception) + + " " + strconv.Itoa(int(rr.KeyTag)) + + " " + sprintName(rr.SignerName) + + " " + rr.Signature + return s +} + +type NSEC struct { + Hdr RR_Header + NextDomain string `dns:"domain-name"` + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *NSEC) String() string { + s := rr.Hdr.String() + sprintName(rr.NextDomain) + for i := 0; i < len(rr.TypeBitMap); i++ { + s += " " + Type(rr.TypeBitMap[i]).String() + } + return s +} + +func (rr *NSEC) len() int { + l := rr.Hdr.len() + len(rr.NextDomain) + 1 + lastwindow := uint32(2 ^ 32 + 1) + for _, t := range rr.TypeBitMap { + window := t / 256 + if uint32(window) != lastwindow { + l += 1 + 32 + } + lastwindow = uint32(window) + } + return l +} + +type DLV struct { + DS +} + +type CDS struct { + DS +} + +type DS struct { + Hdr RR_Header + KeyTag uint16 + Algorithm uint8 + DigestType uint8 + Digest string `dns:"hex"` +} + +func (rr *DS) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.DigestType)) + + " " + strings.ToUpper(rr.Digest) +} + +type KX struct { + Hdr RR_Header + Preference uint16 + Exchanger string `dns:"domain-name"` +} + +func (rr *KX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + + " " + sprintName(rr.Exchanger) +} + +type TA struct { + Hdr RR_Header + KeyTag uint16 + Algorithm uint8 + DigestType uint8 + Digest string `dns:"hex"` +} + +func (rr *TA) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.DigestType)) + + " " + strings.ToUpper(rr.Digest) +} + +type TALINK struct { + Hdr RR_Header + PreviousName string `dns:"domain-name"` + NextName string `dns:"domain-name"` +} + +func (rr *TALINK) String() string { + return rr.Hdr.String() + + sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) +} + +type SSHFP struct { + Hdr RR_Header + Algorithm uint8 + Type uint8 + FingerPrint string `dns:"hex"` +} + +func (rr *SSHFP) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.Type)) + + " " + strings.ToUpper(rr.FingerPrint) +} + +type KEY struct { + DNSKEY +} + +type CDNSKEY struct { + DNSKEY +} + +type DNSKEY struct { + Hdr RR_Header + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` +} + +func (rr *DNSKEY) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Protocol)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + rr.PublicKey +} + +type RKEY struct { + Hdr RR_Header + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` +} + +func (rr *RKEY) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Protocol)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + rr.PublicKey +} + +type NSAPPTR struct { + Hdr RR_Header + Ptr string `dns:"domain-name"` +} + +func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) } + +type NSEC3 struct { + Hdr RR_Header + Hash uint8 + Flags uint8 + Iterations uint16 + SaltLength uint8 + Salt string `dns:"size-hex:SaltLength"` + HashLength uint8 + NextDomain string `dns:"size-base32:HashLength"` + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *NSEC3) String() string { + s := rr.Hdr.String() + s += strconv.Itoa(int(rr.Hash)) + + " " + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Iterations)) + + " " + saltToString(rr.Salt) + + " " + rr.NextDomain + for i := 0; i < len(rr.TypeBitMap); i++ { + s += " " + Type(rr.TypeBitMap[i]).String() + } + return s +} + +func (rr *NSEC3) len() int { + l := rr.Hdr.len() + 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 + lastwindow := uint32(2 ^ 32 + 1) + for _, t := range rr.TypeBitMap { + window := t / 256 + if uint32(window) != lastwindow { + l += 1 + 32 + } + lastwindow = uint32(window) + } + return l +} + +type NSEC3PARAM struct { + Hdr RR_Header + Hash uint8 + Flags uint8 + Iterations uint16 + SaltLength uint8 + Salt string `dns:"hex"` +} + +func (rr *NSEC3PARAM) String() string { + s := rr.Hdr.String() + s += strconv.Itoa(int(rr.Hash)) + + " " + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Iterations)) + + " " + saltToString(rr.Salt) + return s +} + +type TKEY struct { + Hdr RR_Header + Algorithm string `dns:"domain-name"` + Inception uint32 + Expiration uint32 + Mode uint16 + Error uint16 + KeySize uint16 + Key string + OtherLen uint16 + OtherData string +} + +func (rr *TKEY) String() string { + // It has no presentation format + return "" +} + +// RFC3597 represents an unknown/generic RR. +type RFC3597 struct { + Hdr RR_Header + Rdata string `dns:"hex"` +} + +func (rr *RFC3597) String() string { + // Let's call it a hack + s := rfc3597Header(rr.Hdr) + + s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata + return s +} + +func rfc3597Header(h RR_Header) string { + var s string + + s += sprintName(h.Name) + "\t" + s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" + s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t" + s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t" + return s +} + +type URI struct { + Hdr RR_Header + Priority uint16 + Weight uint16 + Target string `dns:"octet"` +} + +func (rr *URI) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + + " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target) +} + +type DHCID struct { + Hdr RR_Header + Digest string `dns:"base64"` +} + +func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest } + +type TLSA struct { + Hdr RR_Header + Usage uint8 + Selector uint8 + MatchingType uint8 + Certificate string `dns:"hex"` +} + +func (rr *TLSA) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Usage)) + + " " + strconv.Itoa(int(rr.Selector)) + + " " + strconv.Itoa(int(rr.MatchingType)) + + " " + rr.Certificate +} + +type HIP struct { + Hdr RR_Header + HitLength uint8 + PublicKeyAlgorithm uint8 + PublicKeyLength uint16 + Hit string `dns:"size-hex:HitLength"` + PublicKey string `dns:"size-base64:PublicKeyLength"` + RendezvousServers []string `dns:"domain-name"` +} + +func (rr *HIP) String() string { + s := rr.Hdr.String() + + strconv.Itoa(int(rr.PublicKeyAlgorithm)) + + " " + rr.Hit + + " " + rr.PublicKey + for _, d := range rr.RendezvousServers { + s += " " + sprintName(d) + } + return s +} + +type NINFO struct { + Hdr RR_Header + ZSData []string `dns:"txt"` +} + +func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) } + +type NID struct { + Hdr RR_Header + Preference uint16 + NodeID uint64 +} + +func (rr *NID) String() string { + s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + node := fmt.Sprintf("%0.16x", rr.NodeID) + s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] + return s +} + +type L32 struct { + Hdr RR_Header + Preference uint16 + Locator32 net.IP `dns:"a"` +} + +func (rr *L32) String() string { + if rr.Locator32 == nil { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + } + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + + " " + rr.Locator32.String() +} + +type L64 struct { + Hdr RR_Header + Preference uint16 + Locator64 uint64 +} + +func (rr *L64) String() string { + s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + node := fmt.Sprintf("%0.16X", rr.Locator64) + s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] + return s +} + +type LP struct { + Hdr RR_Header + Preference uint16 + Fqdn string `dns:"domain-name"` +} + +func (rr *LP) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn) +} + +type EUI48 struct { + Hdr RR_Header + Address uint64 `dns:"uint48"` +} + +func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) } + +type EUI64 struct { + Hdr RR_Header + Address uint64 +} + +func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) } + +type CAA struct { + Hdr RR_Header + Flag uint8 + Tag string + Value string `dns:"octet"` +} + +func (rr *CAA) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value) +} + +type UID struct { + Hdr RR_Header + Uid uint32 +} + +func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) } + +type GID struct { + Hdr RR_Header + Gid uint32 +} + +func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) } + +type UINFO struct { + Hdr RR_Header + Uinfo string +} + +func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) } + +type EID struct { + Hdr RR_Header + Endpoint string `dns:"hex"` +} + +func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) } + +type NIMLOC struct { + Hdr RR_Header + Locator string `dns:"hex"` +} + +func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) } + +type OPENPGPKEY struct { + Hdr RR_Header + PublicKey string `dns:"base64"` +} + +func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey } + +// TimeToString translates the RRSIG's incep. and expir. times to the +// string representation used when printing the record. +// It takes serial arithmetic (RFC 1982) into account. +func TimeToString(t uint32) string { + mod := ((int64(t) - time.Now().Unix()) / year68) - 1 + if mod < 0 { + mod = 0 + } + ti := time.Unix(int64(t)-(mod*year68), 0).UTC() + return ti.Format("20060102150405") +} + +// StringToTime translates the RRSIG's incep. and expir. times from +// string values like "20110403154150" to an 32 bit integer. +// It takes serial arithmetic (RFC 1982) into account. +func StringToTime(s string) (uint32, error) { + t, err := time.Parse("20060102150405", s) + if err != nil { + return 0, err + } + mod := (t.Unix() / year68) - 1 + if mod < 0 { + mod = 0 + } + return uint32(t.Unix() - (mod * year68)), nil +} + +// saltToString converts a NSECX salt to uppercase and +// returns "-" when it is empty +func saltToString(s string) string { + if len(s) == 0 { + return "-" + } + return strings.ToUpper(s) +} + +func euiToString(eui uint64, bits int) (hex string) { + switch bits { + case 64: + hex = fmt.Sprintf("%16.16x", eui) + hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + + "-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16] + case 48: + hex = fmt.Sprintf("%12.12x", eui) + hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + + "-" + hex[8:10] + "-" + hex[10:12] + } + return +} + +// copyIP returns a copy of ip. +func copyIP(ip net.IP) net.IP { + p := make(net.IP, len(ip)) + copy(p, ip) + return p +} diff --git a/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/miekg/dns/types_generate.go new file mode 100644 index 000000000000..bf80da329c31 --- /dev/null +++ b/vendor/github.com/miekg/dns/types_generate.go @@ -0,0 +1,271 @@ +//+build ignore + +// types_generate.go is meant to run with go generate. It will use +// go/{importer,types} to track down all the RR struct types. Then for each type +// it will generate conversion tables (TypeToRR and TypeToString) and banal +// methods (len, Header, copy) based on the struct tags. The generated source is +// written to ztypes.go, and is meant to be checked into git. +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/importer" + "go/types" + "log" + "os" + "strings" + "text/template" +) + +var skipLen = map[string]struct{}{ + "NSEC": {}, + "NSEC3": {}, + "OPT": {}, +} + +var packageHdr = ` +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from type_generate.go + +package dns + +import ( + "encoding/base64" + "net" +) + +` + +var TypeToRR = template.Must(template.New("TypeToRR").Parse(` +// TypeToRR is a map of constructors for each RR type. +var TypeToRR = map[uint16]func() RR{ +{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) }, +{{end}}{{end}} } + +`)) + +var typeToString = template.Must(template.New("typeToString").Parse(` +// TypeToString is a map of strings for each RR type. +var TypeToString = map[uint16]string{ +{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}", +{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR", +} + +`)) + +var headerFunc = template.Must(template.New("headerFunc").Parse(` +// Header() functions +{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr } +{{end}} + +`)) + +// getTypeStruct will take a type and the package scope, and return the +// (innermost) struct if the type is considered a RR type (currently defined as +// those structs beginning with a RR_Header, could be redefined as implementing +// the RR interface). The bool return value indicates if embedded structs were +// resolved. +func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { + st, ok := t.Underlying().(*types.Struct) + if !ok { + return nil, false + } + if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { + return st, false + } + if st.Field(0).Anonymous() { + st, _ := getTypeStruct(st.Field(0).Type(), scope) + return st, true + } + return nil, false +} + +func main() { + // Import and type-check the package + pkg, err := importer.Default().Import("github.com/miekg/dns") + fatalIfErr(err) + scope := pkg.Scope() + + // Collect constants like TypeX + var numberedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + b, ok := o.Type().(*types.Basic) + if !ok || b.Kind() != types.Uint16 { + continue + } + if !strings.HasPrefix(o.Name(), "Type") { + continue + } + name := strings.TrimPrefix(o.Name(), "Type") + if name == "PrivateRR" { + continue + } + numberedTypes = append(numberedTypes, name) + } + + // Collect actual types (*X) + var namedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + if st, _ := getTypeStruct(o.Type(), scope); st == nil { + continue + } + if name == "PrivateRR" { + continue + } + + // Check if corresponding TypeX exists + if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { + log.Fatalf("Constant Type%s does not exist.", o.Name()) + } + + namedTypes = append(namedTypes, o.Name()) + } + + b := &bytes.Buffer{} + b.WriteString(packageHdr) + + // Generate TypeToRR + fatalIfErr(TypeToRR.Execute(b, namedTypes)) + + // Generate typeToString + fatalIfErr(typeToString.Execute(b, numberedTypes)) + + // Generate headerFunc + fatalIfErr(headerFunc.Execute(b, namedTypes)) + + // Generate len() + fmt.Fprint(b, "// len() functions\n") + for _, name := range namedTypes { + if _, ok := skipLen[name]; ok { + continue + } + o := scope.Lookup(name) + st, isEmbedded := getTypeStruct(o.Type(), scope) + if isEmbedded { + continue + } + fmt.Fprintf(b, "func (rr *%s) len() int {\n", name) + fmt.Fprintf(b, "l := rr.Hdr.len()\n") + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: + // ignored + case `dns:"cdomain-name"`, `dns:"domain-name"`, `dns:"txt"`: + o("for _, x := range rr.%s { l += len(x) + 1 }\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch { + case st.Tag(i) == `dns:"-"`: + // ignored + case st.Tag(i) == `dns:"cdomain-name"`, st.Tag(i) == `dns:"domain-name"`: + o("l += len(rr.%s) + 1\n") + case st.Tag(i) == `dns:"octet"`: + o("l += len(rr.%s)\n") + case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): + fallthrough + case st.Tag(i) == `dns:"base64"`: + o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n") + case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): + fallthrough + case st.Tag(i) == `dns:"hex"`: + o("l += len(rr.%s)/2 + 1\n") + case st.Tag(i) == `dns:"a"`: + o("l += net.IPv4len // %s\n") + case st.Tag(i) == `dns:"aaaa"`: + o("l += net.IPv6len // %s\n") + case st.Tag(i) == `dns:"txt"`: + o("for _, t := range rr.%s { l += len(t) + 1 }\n") + case st.Tag(i) == `dns:"uint48"`: + o("l += 6 // %s\n") + case st.Tag(i) == "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("l += 1 // %s\n") + case types.Uint16: + o("l += 2 // %s\n") + case types.Uint32: + o("l += 4 // %s\n") + case types.Uint64: + o("l += 8 // %s\n") + case types.String: + o("l += len(rr.%s) + 1\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + } + fmt.Fprintf(b, "return l }\n") + } + + // Generate copy() + fmt.Fprint(b, "// copy() functions\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, isEmbedded := getTypeStruct(o.Type(), scope) + if isEmbedded { + continue + } + fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name) + fields := []string{"*rr.Hdr.copyHeader()"} + for i := 1; i < st.NumFields(); i++ { + f := st.Field(i).Name() + if sl, ok := st.Field(i).Type().(*types.Slice); ok { + t := sl.Underlying().String() + t = strings.TrimPrefix(t, "[]") + if strings.Contains(t, ".") { + splits := strings.Split(t, ".") + t = splits[len(splits)-1] + } + fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n", + f, t, f, f, f) + fields = append(fields, f) + continue + } + if st.Field(i).Type().String() == "net.IP" { + fields = append(fields, "copyIP(rr."+f+")") + continue + } + fields = append(fields, "rr."+f) + } + fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ",")) + fmt.Fprintf(b, "}\n") + } + + // gofmt + res, err := format.Source(b.Bytes()) + if err != nil { + b.WriteTo(os.Stderr) + log.Fatal(err) + } + + // write result + f, err := os.Create("ztypes.go") + fatalIfErr(err) + defer f.Close() + f.Write(res) +} + +func fatalIfErr(err error) { + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go new file mode 100644 index 000000000000..c79c6c88371b --- /dev/null +++ b/vendor/github.com/miekg/dns/udp.go @@ -0,0 +1,58 @@ +// +build !windows,!plan9 + +package dns + +import ( + "net" + "syscall" +) + +// SessionUDP holds the remote address and the associated +// out-of-band data. +type SessionUDP struct { + raddr *net.UDPAddr + context []byte +} + +// RemoteAddr returns the remote network address. +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// setUDPSocketOptions sets the UDP socket options. +// This function is implemented on a per platform basis. See udp_*.go for more details +func setUDPSocketOptions(conn *net.UDPConn) error { + sa, err := getUDPSocketName(conn) + if err != nil { + return err + } + switch sa.(type) { + case *syscall.SockaddrInet6: + v6only, err := getUDPSocketOptions6Only(conn) + if err != nil { + return err + } + setUDPSocketOptions6(conn) + if !v6only { + setUDPSocketOptions4(conn) + } + case *syscall.SockaddrInet4: + setUDPSocketOptions4(conn) + } + return nil +} + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + oob := make([]byte, 40) + n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) + if err != nil { + return n, nil, err + } + return n, &SessionUDP{raddr, oob[:oobn]}, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr) + return n, err +} diff --git a/vendor/github.com/miekg/dns/udp_linux.go b/vendor/github.com/miekg/dns/udp_linux.go new file mode 100644 index 000000000000..c62d21881b63 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_linux.go @@ -0,0 +1,73 @@ +// +build linux + +package dns + +// See: +// * http://stackoverflow.com/questions/3062205/setting-the-source-ip-for-a-udp-socket and +// * http://blog.powerdns.com/2012/10/08/on-binding-datagram-udp-sockets-to-the-any-addresses/ +// +// Why do we need this: When listening on 0.0.0.0 with UDP so kernel decides what is the outgoing +// interface, this might not always be the correct one. This code will make sure the egress +// packet's interface matched the ingress' one. + +import ( + "net" + "syscall" +) + +// setUDPSocketOptions4 prepares the v4 socket for sessions. +func setUDPSocketOptions4(conn *net.UDPConn) error { + file, err := conn.File() + if err != nil { + return err + } + if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IP, syscall.IP_PKTINFO, 1); err != nil { + return err + } + // Calling File() above results in the connection becoming blocking, we must fix that. + // See https://github.com/miekg/dns/issues/279 + err = syscall.SetNonblock(int(file.Fd()), true) + if err != nil { + return err + } + return nil +} + +// setUDPSocketOptions6 prepares the v6 socket for sessions. +func setUDPSocketOptions6(conn *net.UDPConn) error { + file, err := conn.File() + if err != nil { + return err + } + if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_RECVPKTINFO, 1); err != nil { + return err + } + err = syscall.SetNonblock(int(file.Fd()), true) + if err != nil { + return err + } + return nil +} + +// getUDPSocketOption6Only return true if the socket is v6 only and false when it is v4/v6 combined +// (dualstack). +func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { + file, err := conn.File() + if err != nil { + return false, err + } + // dual stack. See http://stackoverflow.com/questions/1618240/how-to-support-both-ipv4-and-ipv6-connections + v6only, err := syscall.GetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY) + if err != nil { + return false, err + } + return v6only == 1, nil +} + +func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { + file, err := conn.File() + if err != nil { + return nil, err + } + return syscall.Getsockname(int(file.Fd())) +} diff --git a/vendor/github.com/miekg/dns/udp_other.go b/vendor/github.com/miekg/dns/udp_other.go new file mode 100644 index 000000000000..d40732441b0f --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_other.go @@ -0,0 +1,17 @@ +// +build !linux,!plan9 + +package dns + +import ( + "net" + "syscall" +) + +// These do nothing. See udp_linux.go for an example of how to implement this. + +// We tried to adhire to some kind of naming scheme. + +func setUDPSocketOptions4(conn *net.UDPConn) error { return nil } +func setUDPSocketOptions6(conn *net.UDPConn) error { return nil } +func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { return false, nil } +func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { return nil, nil } diff --git a/vendor/github.com/miekg/dns/udp_plan9.go b/vendor/github.com/miekg/dns/udp_plan9.go new file mode 100644 index 000000000000..b794deeba0ff --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_plan9.go @@ -0,0 +1,34 @@ +package dns + +import ( + "net" +) + +func setUDPSocketOptions(conn *net.UDPConn) error { return nil } + +// SessionUDP holds the remote address and the associated +// out-of-band data. +type SessionUDP struct { + raddr *net.UDPAddr + context []byte +} + +// RemoteAddr returns the remote network address. +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + oob := make([]byte, 40) + n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) + if err != nil { + return n, nil, err + } + return n, &SessionUDP{raddr, oob[:oobn]}, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr) + return n, err +} diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go new file mode 100644 index 000000000000..2ce4b3300287 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_windows.go @@ -0,0 +1,34 @@ +// +build windows + +package dns + +import "net" + +type SessionUDP struct { + raddr *net.UDPAddr +} + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + n, raddr, err := conn.ReadFrom(b) + if err != nil { + return n, nil, err + } + session := &SessionUDP{raddr.(*net.UDPAddr)} + return n, session, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + n, err := conn.WriteTo(b, session.raddr) + return n, err +} + +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// setUDPSocketOptions sets the UDP socket options. +// This function is implemented on a per platform basis. See udp_*.go for more details +func setUDPSocketOptions(conn *net.UDPConn) error { + return nil +} diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go new file mode 100644 index 000000000000..e90c5c968ec0 --- /dev/null +++ b/vendor/github.com/miekg/dns/update.go @@ -0,0 +1,106 @@ +package dns + +// NameUsed sets the RRs in the prereq section to +// "Name is in use" RRs. RFC 2136 section 2.4.4. +func (u *Msg) NameUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) + } +} + +// NameNotUsed sets the RRs in the prereq section to +// "Name is in not use" RRs. RFC 2136 section 2.4.5. +func (u *Msg) NameNotUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}}) + } +} + +// Used sets the RRs in the prereq section to +// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2. +func (u *Msg) Used(rr []RR) { + if len(u.Question) == 0 { + panic("dns: empty question section") + } + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = u.Question[0].Qclass + u.Answer = append(u.Answer, r) + } +} + +// RRsetUsed sets the RRs in the prereq section to +// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1. +func (u *Msg) RRsetUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) + } +} + +// RRsetNotUsed sets the RRs in the prereq section to +// "RRset does not exist" RRs. RFC 2136 section 2.4.3. +func (u *Msg) RRsetNotUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassNONE}}) + } +} + +// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1. +func (u *Msg) Insert(rr []RR) { + if len(u.Question) == 0 { + panic("dns: empty question section") + } + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = u.Question[0].Qclass + u.Ns = append(u.Ns, r) + } +} + +// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2. +func (u *Msg) RemoveRRset(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) + } +} + +// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3 +func (u *Msg) RemoveName(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) + } +} + +// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4 +func (u *Msg) Remove(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = ClassNONE + r.Header().Ttl = 0 + u.Ns = append(u.Ns, r) + } +} diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go new file mode 100644 index 000000000000..7346deffbb73 --- /dev/null +++ b/vendor/github.com/miekg/dns/xfr.go @@ -0,0 +1,244 @@ +package dns + +import ( + "time" +) + +// Envelope is used when doing a zone transfer with a remote server. +type Envelope struct { + RR []RR // The set of RRs in the answer section of the xfr reply message. + Error error // If something went wrong, this contains the error. +} + +// A Transfer defines parameters that are used during a zone transfer. +type Transfer struct { + *Conn + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds + TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be fully qualified + tsigTimersOnly bool +} + +// Think we need to away to stop the transfer + +// In performs an incoming transfer with the server in a. +// If you would like to set the source IP, or some other attribute +// of a Dialer for a Transfer, you can do so by specifying the attributes +// in the Transfer.Conn: +// +// d := net.Dialer{LocalAddr: transfer_source} +// con, err := d.Dial("tcp", master) +// dnscon := &dns.Conn{Conn:con} +// transfer = &dns.Transfer{Conn: dnscon} +// channel, err := transfer.In(message, master) +// +func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { + timeout := dnsTimeout + if t.DialTimeout != 0 { + timeout = t.DialTimeout + } + if t.Conn == nil { + t.Conn, err = DialTimeout("tcp", a, timeout) + if err != nil { + return nil, err + } + } + if err := t.WriteMsg(q); err != nil { + return nil, err + } + env = make(chan *Envelope) + go func() { + if q.Question[0].Qtype == TypeAXFR { + go t.inAxfr(q.Id, env) + return + } + if q.Question[0].Qtype == TypeIXFR { + go t.inIxfr(q.Id, env) + return + } + }() + return env, nil +} + +func (t *Transfer) inAxfr(id uint16, c chan *Envelope) { + first := true + defer t.Close() + defer close(c) + timeout := dnsTimeout + if t.ReadTimeout != 0 { + timeout = t.ReadTimeout + } + for { + t.Conn.SetReadDeadline(time.Now().Add(timeout)) + in, err := t.ReadMsg() + if err != nil { + c <- &Envelope{nil, err} + return + } + if id != in.Id { + c <- &Envelope{in.Answer, ErrId} + return + } + if first { + if !isSOAFirst(in) { + c <- &Envelope{in.Answer, ErrSoa} + return + } + first = !first + // only one answer that is SOA, receive more + if len(in.Answer) == 1 { + t.tsigTimersOnly = true + c <- &Envelope{in.Answer, nil} + continue + } + } + + if !first { + t.tsigTimersOnly = true // Subsequent envelopes use this. + if isSOALast(in) { + c <- &Envelope{in.Answer, nil} + return + } + c <- &Envelope{in.Answer, nil} + } + } +} + +func (t *Transfer) inIxfr(id uint16, c chan *Envelope) { + serial := uint32(0) // The first serial seen is the current server serial + first := true + defer t.Close() + defer close(c) + timeout := dnsTimeout + if t.ReadTimeout != 0 { + timeout = t.ReadTimeout + } + for { + t.SetReadDeadline(time.Now().Add(timeout)) + in, err := t.ReadMsg() + if err != nil { + c <- &Envelope{nil, err} + return + } + if id != in.Id { + c <- &Envelope{in.Answer, ErrId} + return + } + if first { + // A single SOA RR signals "no changes" + if len(in.Answer) == 1 && isSOAFirst(in) { + c <- &Envelope{in.Answer, nil} + return + } + + // Check if the returned answer is ok + if !isSOAFirst(in) { + c <- &Envelope{in.Answer, ErrSoa} + return + } + // This serial is important + serial = in.Answer[0].(*SOA).Serial + first = !first + } + + // Now we need to check each message for SOA records, to see what we need to do + if !first { + t.tsigTimersOnly = true + // If the last record in the IXFR contains the servers' SOA, we should quit + if v, ok := in.Answer[len(in.Answer)-1].(*SOA); ok { + if v.Serial == serial { + c <- &Envelope{in.Answer, nil} + return + } + } + c <- &Envelope{in.Answer, nil} + } + } +} + +// Out performs an outgoing transfer with the client connecting in w. +// Basic use pattern: +// +// ch := make(chan *dns.Envelope) +// tr := new(dns.Transfer) +// go tr.Out(w, r, ch) +// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}} +// close(ch) +// w.Hijack() +// // w.Close() // Client closes connection +// +// The server is responsible for sending the correct sequence of RRs through the +// channel ch. +func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error { + for x := range ch { + r := new(Msg) + // Compress? + r.SetReply(q) + r.Authoritative = true + // assume it fits TODO(miek): fix + r.Answer = append(r.Answer, x.RR...) + if err := w.WriteMsg(r); err != nil { + return err + } + } + w.TsigTimersOnly(true) + return nil +} + +// ReadMsg reads a message from the transfer connection t. +func (t *Transfer) ReadMsg() (*Msg, error) { + m := new(Msg) + p := make([]byte, MaxMsgSize) + n, err := t.Read(p) + if err != nil && n == 0 { + return nil, err + } + p = p[:n] + if err := m.Unpack(p); err != nil { + return nil, err + } + if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { + if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { + return m, ErrSecret + } + // Need to work on the original message p, as that was used to calculate the tsig. + err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) + t.tsigRequestMAC = ts.MAC + } + return m, err +} + +// WriteMsg writes a message through the transfer connection t. +func (t *Transfer) WriteMsg(m *Msg) (err error) { + var out []byte + if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { + if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { + return ErrSecret + } + out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) + } else { + out, err = m.Pack() + } + if err != nil { + return err + } + if _, err = t.Write(out); err != nil { + return err + } + return nil +} + +func isSOAFirst(in *Msg) bool { + if len(in.Answer) > 0 { + return in.Answer[0].Header().Rrtype == TypeSOA + } + return false +} + +func isSOALast(in *Msg) bool { + if len(in.Answer) > 0 { + return in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA + } + return false +} diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go new file mode 100644 index 000000000000..e5f3cf2974c4 --- /dev/null +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -0,0 +1,3462 @@ +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from msg_generate.go + +package dns + +// pack*() functions + +func (rr *A) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataA(rr.A, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *AAAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataAAAA(rr.AAAA, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *AFSDB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Subtype, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Hostname, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *ANY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Flag, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Tag, msg, off) + if err != nil { + return off, err + } + off, err = packStringOctet(rr.Value, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CDNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CDS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CERT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Type, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Certificate, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Target, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DHCID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringBase64(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DLV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Target, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Endpoint, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EUI48) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint48(rr.Address, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EUI64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint64(rr.Address, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *GID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint32(rr.Gid, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *GPOS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Longitude, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Latitude, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Altitude, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *HINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Cpu, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Os, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *HIP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.HitLength, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.PublicKeyAlgorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.PublicKeyLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Hit, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *KEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *KX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Exchanger, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *L32) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDataA(rr.Locator32, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *L64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packUint64(rr.Locator64, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *LOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Version, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Size, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.HorizPre, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.VertPre, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Latitude, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Longitude, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Altitude, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *LP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Fqdn, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mb, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MD) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Md, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mf, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mg, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Rmail, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Email, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mr, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mx, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NAPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Order, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Service, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Regexp, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Replacement, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packUint64(rr.NodeID, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NIMLOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Locator, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.ZSData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ns, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSAPPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ptr, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.NextDomain, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC3) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Hash, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Iterations, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.SaltLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Salt, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.HashLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase32(rr.NextDomain, msg, off) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC3PARAM) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Hash, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Iterations, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.SaltLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Salt, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *OPENPGPKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *OPT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataOpt(rr.Option, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *PTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ptr, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *PX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Map822, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mapx400, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RFC3597) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Rdata, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mbox, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Txt, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RRSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.TypeCovered, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.SignerName, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Signature, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Host, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.TypeCovered, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.SignerName, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Signature, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SOA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ns, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mbox, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint32(rr.Serial, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Refresh, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Retry, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expire, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Minttl, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SPF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SRV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Weight, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Port, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Target, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SSHFP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Type, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.FingerPrint, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TALINK) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.PreviousName, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.NextName, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Algorithm, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Mode, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeySize, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Key, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.OtherData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TLSA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Usage, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Selector, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.MatchingType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Certificate, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Algorithm, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint48(rr.TimeSigned, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Fudge, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.MACSize, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.MAC, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OrigId, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.OtherData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TXT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *UID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint32(rr.Uid, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *UINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Uinfo, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *URI) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Weight, msg, off) + if err != nil { + return off, err + } + off, err = packStringOctet(rr.Target, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *X25) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.PSDNAddress, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +// unpack*() functions + +func unpackA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(A) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.A, off, err = unpackDataA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackAAAA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(AAAA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.AAAA, off, err = unpackDataAAAA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackAFSDB(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(AFSDB) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Subtype, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Hostname, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackANY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(ANY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + return rr, off, err +} + +func unpackCAA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CAA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flag, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Tag, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Value, off, err = unpackStringOctet(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CDNSKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCDS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CDS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCERT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CERT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Type, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCNAME(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CNAME) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDHCID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DHCID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDLV(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DLV) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDNAME(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DNAME) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DNSKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEUI48(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EUI48) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackUint48(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEUI64(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EUI64) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackGID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(GID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Gid, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackGPOS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(GPOS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Longitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Latitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Altitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackHINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(HINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Cpu, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Os, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackHIP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(HIP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.HitLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKeyLength, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Hit, off, err = unpackStringHex(msg, off, off+int(rr.HitLength)) + if err != nil { + return rr, off, err + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, off+int(rr.PublicKeyLength)) + if err != nil { + return rr, off, err + } + rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(KEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackKX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(KX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Exchanger, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackL32(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(L32) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Locator32, off, err = unpackDataA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackL64(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(L64) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Locator64, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackLOC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(LOC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Version, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Size, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.HorizPre, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.VertPre, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Latitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Longitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Altitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackLP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(LP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Fqdn, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMB(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MB) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mb, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMD(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MD) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Md, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMF(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MF) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mf, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mg, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Rmail, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Email, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mx, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNAPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NAPTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Order, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Service, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Regexp, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Replacement, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NodeID, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNIMLOC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NIMLOC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.ZSData, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ns, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSAPPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSAPPTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ptr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.NextDomain, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC3(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC3) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Hash, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Iterations, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SaltLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) + if err != nil { + return rr, off, err + } + rr.HashLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NextDomain, off, err = unpackStringBase32(msg, off, off+int(rr.HashLength)) + if err != nil { + return rr, off, err + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC3PARAM(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC3PARAM) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Hash, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Iterations, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SaltLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Salt, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackOPENPGPKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(OPENPGPKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackOPT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(OPT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Option, off, err = unpackDataOpt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(PTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ptr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackPX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(PX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Map822, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mapx400, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRFC3597(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RFC3597) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mbox, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Txt, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRRSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RRSIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.TypeCovered, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Labels, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OrigTtl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SignerName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Host, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.TypeCovered, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Labels, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OrigTtl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SignerName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSOA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SOA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ns, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mbox, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Serial, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Refresh, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Retry, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expire, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Minttl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSPF(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SPF) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSRV(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SRV) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Weight, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Port, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSSHFP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SSHFP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Type, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTALINK(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TALINK) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PreviousName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NextName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mode, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Error, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeySize, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Key, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherLen, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherData, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTLSA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TLSA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Usage, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Selector, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MatchingType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TSIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.TimeSigned, off, err = unpackUint48(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Fudge, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MACSize, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MAC, off, err = unpackStringHex(msg, off, off+int(rr.MACSize)) + if err != nil { + return rr, off, err + } + rr.OrigId, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Error, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherLen, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTXT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TXT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackUID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(UID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Uid, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackUINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(UINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Uinfo, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackURI(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(URI) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Weight, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Target, off, err = unpackStringOctet(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackX25(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(X25) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PSDNAddress, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){ + TypeA: unpackA, + TypeAAAA: unpackAAAA, + TypeAFSDB: unpackAFSDB, + TypeANY: unpackANY, + TypeCAA: unpackCAA, + TypeCDNSKEY: unpackCDNSKEY, + TypeCDS: unpackCDS, + TypeCERT: unpackCERT, + TypeCNAME: unpackCNAME, + TypeDHCID: unpackDHCID, + TypeDLV: unpackDLV, + TypeDNAME: unpackDNAME, + TypeDNSKEY: unpackDNSKEY, + TypeDS: unpackDS, + TypeEID: unpackEID, + TypeEUI48: unpackEUI48, + TypeEUI64: unpackEUI64, + TypeGID: unpackGID, + TypeGPOS: unpackGPOS, + TypeHINFO: unpackHINFO, + TypeHIP: unpackHIP, + TypeKEY: unpackKEY, + TypeKX: unpackKX, + TypeL32: unpackL32, + TypeL64: unpackL64, + TypeLOC: unpackLOC, + TypeLP: unpackLP, + TypeMB: unpackMB, + TypeMD: unpackMD, + TypeMF: unpackMF, + TypeMG: unpackMG, + TypeMINFO: unpackMINFO, + TypeMR: unpackMR, + TypeMX: unpackMX, + TypeNAPTR: unpackNAPTR, + TypeNID: unpackNID, + TypeNIMLOC: unpackNIMLOC, + TypeNINFO: unpackNINFO, + TypeNS: unpackNS, + TypeNSAPPTR: unpackNSAPPTR, + TypeNSEC: unpackNSEC, + TypeNSEC3: unpackNSEC3, + TypeNSEC3PARAM: unpackNSEC3PARAM, + TypeOPENPGPKEY: unpackOPENPGPKEY, + TypeOPT: unpackOPT, + TypePTR: unpackPTR, + TypePX: unpackPX, + TypeRKEY: unpackRKEY, + TypeRP: unpackRP, + TypeRRSIG: unpackRRSIG, + TypeRT: unpackRT, + TypeSIG: unpackSIG, + TypeSOA: unpackSOA, + TypeSPF: unpackSPF, + TypeSRV: unpackSRV, + TypeSSHFP: unpackSSHFP, + TypeTA: unpackTA, + TypeTALINK: unpackTALINK, + TypeTKEY: unpackTKEY, + TypeTLSA: unpackTLSA, + TypeTSIG: unpackTSIG, + TypeTXT: unpackTXT, + TypeUID: unpackUID, + TypeUINFO: unpackUINFO, + TypeURI: unpackURI, + TypeX25: unpackX25, +} diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go new file mode 100644 index 000000000000..a4ecbb0cc0fa --- /dev/null +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -0,0 +1,828 @@ +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from type_generate.go + +package dns + +import ( + "encoding/base64" + "net" +) + +// TypeToRR is a map of constructors for each RR type. +var TypeToRR = map[uint16]func() RR{ + TypeA: func() RR { return new(A) }, + TypeAAAA: func() RR { return new(AAAA) }, + TypeAFSDB: func() RR { return new(AFSDB) }, + TypeANY: func() RR { return new(ANY) }, + TypeCAA: func() RR { return new(CAA) }, + TypeCDNSKEY: func() RR { return new(CDNSKEY) }, + TypeCDS: func() RR { return new(CDS) }, + TypeCERT: func() RR { return new(CERT) }, + TypeCNAME: func() RR { return new(CNAME) }, + TypeDHCID: func() RR { return new(DHCID) }, + TypeDLV: func() RR { return new(DLV) }, + TypeDNAME: func() RR { return new(DNAME) }, + TypeDNSKEY: func() RR { return new(DNSKEY) }, + TypeDS: func() RR { return new(DS) }, + TypeEID: func() RR { return new(EID) }, + TypeEUI48: func() RR { return new(EUI48) }, + TypeEUI64: func() RR { return new(EUI64) }, + TypeGID: func() RR { return new(GID) }, + TypeGPOS: func() RR { return new(GPOS) }, + TypeHINFO: func() RR { return new(HINFO) }, + TypeHIP: func() RR { return new(HIP) }, + TypeKEY: func() RR { return new(KEY) }, + TypeKX: func() RR { return new(KX) }, + TypeL32: func() RR { return new(L32) }, + TypeL64: func() RR { return new(L64) }, + TypeLOC: func() RR { return new(LOC) }, + TypeLP: func() RR { return new(LP) }, + TypeMB: func() RR { return new(MB) }, + TypeMD: func() RR { return new(MD) }, + TypeMF: func() RR { return new(MF) }, + TypeMG: func() RR { return new(MG) }, + TypeMINFO: func() RR { return new(MINFO) }, + TypeMR: func() RR { return new(MR) }, + TypeMX: func() RR { return new(MX) }, + TypeNAPTR: func() RR { return new(NAPTR) }, + TypeNID: func() RR { return new(NID) }, + TypeNIMLOC: func() RR { return new(NIMLOC) }, + TypeNINFO: func() RR { return new(NINFO) }, + TypeNS: func() RR { return new(NS) }, + TypeNSAPPTR: func() RR { return new(NSAPPTR) }, + TypeNSEC: func() RR { return new(NSEC) }, + TypeNSEC3: func() RR { return new(NSEC3) }, + TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, + TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, + TypeOPT: func() RR { return new(OPT) }, + TypePTR: func() RR { return new(PTR) }, + TypePX: func() RR { return new(PX) }, + TypeRKEY: func() RR { return new(RKEY) }, + TypeRP: func() RR { return new(RP) }, + TypeRRSIG: func() RR { return new(RRSIG) }, + TypeRT: func() RR { return new(RT) }, + TypeSIG: func() RR { return new(SIG) }, + TypeSOA: func() RR { return new(SOA) }, + TypeSPF: func() RR { return new(SPF) }, + TypeSRV: func() RR { return new(SRV) }, + TypeSSHFP: func() RR { return new(SSHFP) }, + TypeTA: func() RR { return new(TA) }, + TypeTALINK: func() RR { return new(TALINK) }, + TypeTKEY: func() RR { return new(TKEY) }, + TypeTLSA: func() RR { return new(TLSA) }, + TypeTSIG: func() RR { return new(TSIG) }, + TypeTXT: func() RR { return new(TXT) }, + TypeUID: func() RR { return new(UID) }, + TypeUINFO: func() RR { return new(UINFO) }, + TypeURI: func() RR { return new(URI) }, + TypeX25: func() RR { return new(X25) }, +} + +// TypeToString is a map of strings for each RR type. +var TypeToString = map[uint16]string{ + TypeA: "A", + TypeAAAA: "AAAA", + TypeAFSDB: "AFSDB", + TypeANY: "ANY", + TypeATMA: "ATMA", + TypeAXFR: "AXFR", + TypeCAA: "CAA", + TypeCDNSKEY: "CDNSKEY", + TypeCDS: "CDS", + TypeCERT: "CERT", + TypeCNAME: "CNAME", + TypeDHCID: "DHCID", + TypeDLV: "DLV", + TypeDNAME: "DNAME", + TypeDNSKEY: "DNSKEY", + TypeDS: "DS", + TypeEID: "EID", + TypeEUI48: "EUI48", + TypeEUI64: "EUI64", + TypeGID: "GID", + TypeGPOS: "GPOS", + TypeHINFO: "HINFO", + TypeHIP: "HIP", + TypeISDN: "ISDN", + TypeIXFR: "IXFR", + TypeKEY: "KEY", + TypeKX: "KX", + TypeL32: "L32", + TypeL64: "L64", + TypeLOC: "LOC", + TypeLP: "LP", + TypeMAILA: "MAILA", + TypeMAILB: "MAILB", + TypeMB: "MB", + TypeMD: "MD", + TypeMF: "MF", + TypeMG: "MG", + TypeMINFO: "MINFO", + TypeMR: "MR", + TypeMX: "MX", + TypeNAPTR: "NAPTR", + TypeNID: "NID", + TypeNIMLOC: "NIMLOC", + TypeNINFO: "NINFO", + TypeNS: "NS", + TypeNSEC: "NSEC", + TypeNSEC3: "NSEC3", + TypeNSEC3PARAM: "NSEC3PARAM", + TypeNULL: "NULL", + TypeNXT: "NXT", + TypeNone: "None", + TypeOPENPGPKEY: "OPENPGPKEY", + TypeOPT: "OPT", + TypePTR: "PTR", + TypePX: "PX", + TypeRKEY: "RKEY", + TypeRP: "RP", + TypeRRSIG: "RRSIG", + TypeRT: "RT", + TypeReserved: "Reserved", + TypeSIG: "SIG", + TypeSOA: "SOA", + TypeSPF: "SPF", + TypeSRV: "SRV", + TypeSSHFP: "SSHFP", + TypeTA: "TA", + TypeTALINK: "TALINK", + TypeTKEY: "TKEY", + TypeTLSA: "TLSA", + TypeTSIG: "TSIG", + TypeTXT: "TXT", + TypeUID: "UID", + TypeUINFO: "UINFO", + TypeUNSPEC: "UNSPEC", + TypeURI: "URI", + TypeX25: "X25", + TypeNSAPPTR: "NSAP-PTR", +} + +// Header() functions +func (rr *A) Header() *RR_Header { return &rr.Hdr } +func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } +func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } +func (rr *ANY) Header() *RR_Header { return &rr.Hdr } +func (rr *CAA) Header() *RR_Header { return &rr.Hdr } +func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *CDS) Header() *RR_Header { return &rr.Hdr } +func (rr *CERT) Header() *RR_Header { return &rr.Hdr } +func (rr *CNAME) Header() *RR_Header { return &rr.Hdr } +func (rr *DHCID) Header() *RR_Header { return &rr.Hdr } +func (rr *DLV) Header() *RR_Header { return &rr.Hdr } +func (rr *DNAME) Header() *RR_Header { return &rr.Hdr } +func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *DS) Header() *RR_Header { return &rr.Hdr } +func (rr *EID) Header() *RR_Header { return &rr.Hdr } +func (rr *EUI48) Header() *RR_Header { return &rr.Hdr } +func (rr *EUI64) Header() *RR_Header { return &rr.Hdr } +func (rr *GID) Header() *RR_Header { return &rr.Hdr } +func (rr *GPOS) Header() *RR_Header { return &rr.Hdr } +func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *HIP) Header() *RR_Header { return &rr.Hdr } +func (rr *KEY) Header() *RR_Header { return &rr.Hdr } +func (rr *KX) Header() *RR_Header { return &rr.Hdr } +func (rr *L32) Header() *RR_Header { return &rr.Hdr } +func (rr *L64) Header() *RR_Header { return &rr.Hdr } +func (rr *LOC) Header() *RR_Header { return &rr.Hdr } +func (rr *LP) Header() *RR_Header { return &rr.Hdr } +func (rr *MB) Header() *RR_Header { return &rr.Hdr } +func (rr *MD) Header() *RR_Header { return &rr.Hdr } +func (rr *MF) Header() *RR_Header { return &rr.Hdr } +func (rr *MG) Header() *RR_Header { return &rr.Hdr } +func (rr *MINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *MR) Header() *RR_Header { return &rr.Hdr } +func (rr *MX) Header() *RR_Header { return &rr.Hdr } +func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr } +func (rr *NID) Header() *RR_Header { return &rr.Hdr } +func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr } +func (rr *NINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *NS) Header() *RR_Header { return &rr.Hdr } +func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } +func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *OPT) Header() *RR_Header { return &rr.Hdr } +func (rr *PTR) Header() *RR_Header { return &rr.Hdr } +func (rr *PX) Header() *RR_Header { return &rr.Hdr } +func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr } +func (rr *RKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *RP) Header() *RR_Header { return &rr.Hdr } +func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr } +func (rr *RT) Header() *RR_Header { return &rr.Hdr } +func (rr *SIG) Header() *RR_Header { return &rr.Hdr } +func (rr *SOA) Header() *RR_Header { return &rr.Hdr } +func (rr *SPF) Header() *RR_Header { return &rr.Hdr } +func (rr *SRV) Header() *RR_Header { return &rr.Hdr } +func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr } +func (rr *TA) Header() *RR_Header { return &rr.Hdr } +func (rr *TALINK) Header() *RR_Header { return &rr.Hdr } +func (rr *TKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *TLSA) Header() *RR_Header { return &rr.Hdr } +func (rr *TSIG) Header() *RR_Header { return &rr.Hdr } +func (rr *TXT) Header() *RR_Header { return &rr.Hdr } +func (rr *UID) Header() *RR_Header { return &rr.Hdr } +func (rr *UINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *URI) Header() *RR_Header { return &rr.Hdr } +func (rr *X25) Header() *RR_Header { return &rr.Hdr } + +// len() functions +func (rr *A) len() int { + l := rr.Hdr.len() + l += net.IPv4len // A + return l +} +func (rr *AAAA) len() int { + l := rr.Hdr.len() + l += net.IPv6len // AAAA + return l +} +func (rr *AFSDB) len() int { + l := rr.Hdr.len() + l += 2 // Subtype + l += len(rr.Hostname) + 1 + return l +} +func (rr *ANY) len() int { + l := rr.Hdr.len() + return l +} +func (rr *CAA) len() int { + l := rr.Hdr.len() + l += 1 // Flag + l += len(rr.Tag) + 1 + l += len(rr.Value) + return l +} +func (rr *CERT) len() int { + l := rr.Hdr.len() + l += 2 // Type + l += 2 // KeyTag + l += 1 // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) + return l +} +func (rr *CNAME) len() int { + l := rr.Hdr.len() + l += len(rr.Target) + 1 + return l +} +func (rr *DHCID) len() int { + l := rr.Hdr.len() + l += base64.StdEncoding.DecodedLen(len(rr.Digest)) + return l +} +func (rr *DNAME) len() int { + l := rr.Hdr.len() + l += len(rr.Target) + 1 + return l +} +func (rr *DNSKEY) len() int { + l := rr.Hdr.len() + l += 2 // Flags + l += 1 // Protocol + l += 1 // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *DS) len() int { + l := rr.Hdr.len() + l += 2 // KeyTag + l += 1 // Algorithm + l += 1 // DigestType + l += len(rr.Digest)/2 + 1 + return l +} +func (rr *EID) len() int { + l := rr.Hdr.len() + l += len(rr.Endpoint)/2 + 1 + return l +} +func (rr *EUI48) len() int { + l := rr.Hdr.len() + l += 6 // Address + return l +} +func (rr *EUI64) len() int { + l := rr.Hdr.len() + l += 8 // Address + return l +} +func (rr *GID) len() int { + l := rr.Hdr.len() + l += 4 // Gid + return l +} +func (rr *GPOS) len() int { + l := rr.Hdr.len() + l += len(rr.Longitude) + 1 + l += len(rr.Latitude) + 1 + l += len(rr.Altitude) + 1 + return l +} +func (rr *HINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Cpu) + 1 + l += len(rr.Os) + 1 + return l +} +func (rr *HIP) len() int { + l := rr.Hdr.len() + l += 1 // HitLength + l += 1 // PublicKeyAlgorithm + l += 2 // PublicKeyLength + l += len(rr.Hit)/2 + 1 + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + for _, x := range rr.RendezvousServers { + l += len(x) + 1 + } + return l +} +func (rr *KX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Exchanger) + 1 + return l +} +func (rr *L32) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += net.IPv4len // Locator32 + return l +} +func (rr *L64) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += 8 // Locator64 + return l +} +func (rr *LOC) len() int { + l := rr.Hdr.len() + l += 1 // Version + l += 1 // Size + l += 1 // HorizPre + l += 1 // VertPre + l += 4 // Latitude + l += 4 // Longitude + l += 4 // Altitude + return l +} +func (rr *LP) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Fqdn) + 1 + return l +} +func (rr *MB) len() int { + l := rr.Hdr.len() + l += len(rr.Mb) + 1 + return l +} +func (rr *MD) len() int { + l := rr.Hdr.len() + l += len(rr.Md) + 1 + return l +} +func (rr *MF) len() int { + l := rr.Hdr.len() + l += len(rr.Mf) + 1 + return l +} +func (rr *MG) len() int { + l := rr.Hdr.len() + l += len(rr.Mg) + 1 + return l +} +func (rr *MINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Rmail) + 1 + l += len(rr.Email) + 1 + return l +} +func (rr *MR) len() int { + l := rr.Hdr.len() + l += len(rr.Mr) + 1 + return l +} +func (rr *MX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Mx) + 1 + return l +} +func (rr *NAPTR) len() int { + l := rr.Hdr.len() + l += 2 // Order + l += 2 // Preference + l += len(rr.Flags) + 1 + l += len(rr.Service) + 1 + l += len(rr.Regexp) + 1 + l += len(rr.Replacement) + 1 + return l +} +func (rr *NID) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += 8 // NodeID + return l +} +func (rr *NIMLOC) len() int { + l := rr.Hdr.len() + l += len(rr.Locator)/2 + 1 + return l +} +func (rr *NINFO) len() int { + l := rr.Hdr.len() + for _, x := range rr.ZSData { + l += len(x) + 1 + } + return l +} +func (rr *NS) len() int { + l := rr.Hdr.len() + l += len(rr.Ns) + 1 + return l +} +func (rr *NSAPPTR) len() int { + l := rr.Hdr.len() + l += len(rr.Ptr) + 1 + return l +} +func (rr *NSEC3PARAM) len() int { + l := rr.Hdr.len() + l += 1 // Hash + l += 1 // Flags + l += 2 // Iterations + l += 1 // SaltLength + l += len(rr.Salt)/2 + 1 + return l +} +func (rr *OPENPGPKEY) len() int { + l := rr.Hdr.len() + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *PTR) len() int { + l := rr.Hdr.len() + l += len(rr.Ptr) + 1 + return l +} +func (rr *PX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Map822) + 1 + l += len(rr.Mapx400) + 1 + return l +} +func (rr *RFC3597) len() int { + l := rr.Hdr.len() + l += len(rr.Rdata)/2 + 1 + return l +} +func (rr *RKEY) len() int { + l := rr.Hdr.len() + l += 2 // Flags + l += 1 // Protocol + l += 1 // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *RP) len() int { + l := rr.Hdr.len() + l += len(rr.Mbox) + 1 + l += len(rr.Txt) + 1 + return l +} +func (rr *RRSIG) len() int { + l := rr.Hdr.len() + l += 2 // TypeCovered + l += 1 // Algorithm + l += 1 // Labels + l += 4 // OrigTtl + l += 4 // Expiration + l += 4 // Inception + l += 2 // KeyTag + l += len(rr.SignerName) + 1 + l += base64.StdEncoding.DecodedLen(len(rr.Signature)) + return l +} +func (rr *RT) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Host) + 1 + return l +} +func (rr *SOA) len() int { + l := rr.Hdr.len() + l += len(rr.Ns) + 1 + l += len(rr.Mbox) + 1 + l += 4 // Serial + l += 4 // Refresh + l += 4 // Retry + l += 4 // Expire + l += 4 // Minttl + return l +} +func (rr *SPF) len() int { + l := rr.Hdr.len() + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *SRV) len() int { + l := rr.Hdr.len() + l += 2 // Priority + l += 2 // Weight + l += 2 // Port + l += len(rr.Target) + 1 + return l +} +func (rr *SSHFP) len() int { + l := rr.Hdr.len() + l += 1 // Algorithm + l += 1 // Type + l += len(rr.FingerPrint)/2 + 1 + return l +} +func (rr *TA) len() int { + l := rr.Hdr.len() + l += 2 // KeyTag + l += 1 // Algorithm + l += 1 // DigestType + l += len(rr.Digest)/2 + 1 + return l +} +func (rr *TALINK) len() int { + l := rr.Hdr.len() + l += len(rr.PreviousName) + 1 + l += len(rr.NextName) + 1 + return l +} +func (rr *TKEY) len() int { + l := rr.Hdr.len() + l += len(rr.Algorithm) + 1 + l += 4 // Inception + l += 4 // Expiration + l += 2 // Mode + l += 2 // Error + l += 2 // KeySize + l += len(rr.Key) + 1 + l += 2 // OtherLen + l += len(rr.OtherData) + 1 + return l +} +func (rr *TLSA) len() int { + l := rr.Hdr.len() + l += 1 // Usage + l += 1 // Selector + l += 1 // MatchingType + l += len(rr.Certificate)/2 + 1 + return l +} +func (rr *TSIG) len() int { + l := rr.Hdr.len() + l += len(rr.Algorithm) + 1 + l += 6 // TimeSigned + l += 2 // Fudge + l += 2 // MACSize + l += len(rr.MAC)/2 + 1 + l += 2 // OrigId + l += 2 // Error + l += 2 // OtherLen + l += len(rr.OtherData)/2 + 1 + return l +} +func (rr *TXT) len() int { + l := rr.Hdr.len() + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *UID) len() int { + l := rr.Hdr.len() + l += 4 // Uid + return l +} +func (rr *UINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Uinfo) + 1 + return l +} +func (rr *URI) len() int { + l := rr.Hdr.len() + l += 2 // Priority + l += 2 // Weight + l += len(rr.Target) + return l +} +func (rr *X25) len() int { + l := rr.Hdr.len() + l += len(rr.PSDNAddress) + 1 + return l +} + +// copy() functions +func (rr *A) copy() RR { + return &A{*rr.Hdr.copyHeader(), copyIP(rr.A)} +} +func (rr *AAAA) copy() RR { + return &AAAA{*rr.Hdr.copyHeader(), copyIP(rr.AAAA)} +} +func (rr *AFSDB) copy() RR { + return &AFSDB{*rr.Hdr.copyHeader(), rr.Subtype, rr.Hostname} +} +func (rr *ANY) copy() RR { + return &ANY{*rr.Hdr.copyHeader()} +} +func (rr *CAA) copy() RR { + return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value} +} +func (rr *CERT) copy() RR { + return &CERT{*rr.Hdr.copyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} +} +func (rr *CNAME) copy() RR { + return &CNAME{*rr.Hdr.copyHeader(), rr.Target} +} +func (rr *DHCID) copy() RR { + return &DHCID{*rr.Hdr.copyHeader(), rr.Digest} +} +func (rr *DNAME) copy() RR { + return &DNAME{*rr.Hdr.copyHeader(), rr.Target} +} +func (rr *DNSKEY) copy() RR { + return &DNSKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} +} +func (rr *DS) copy() RR { + return &DS{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} +} +func (rr *EID) copy() RR { + return &EID{*rr.Hdr.copyHeader(), rr.Endpoint} +} +func (rr *EUI48) copy() RR { + return &EUI48{*rr.Hdr.copyHeader(), rr.Address} +} +func (rr *EUI64) copy() RR { + return &EUI64{*rr.Hdr.copyHeader(), rr.Address} +} +func (rr *GID) copy() RR { + return &GID{*rr.Hdr.copyHeader(), rr.Gid} +} +func (rr *GPOS) copy() RR { + return &GPOS{*rr.Hdr.copyHeader(), rr.Longitude, rr.Latitude, rr.Altitude} +} +func (rr *HINFO) copy() RR { + return &HINFO{*rr.Hdr.copyHeader(), rr.Cpu, rr.Os} +} +func (rr *HIP) copy() RR { + RendezvousServers := make([]string, len(rr.RendezvousServers)) + copy(RendezvousServers, rr.RendezvousServers) + return &HIP{*rr.Hdr.copyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} +} +func (rr *KX) copy() RR { + return &KX{*rr.Hdr.copyHeader(), rr.Preference, rr.Exchanger} +} +func (rr *L32) copy() RR { + return &L32{*rr.Hdr.copyHeader(), rr.Preference, copyIP(rr.Locator32)} +} +func (rr *L64) copy() RR { + return &L64{*rr.Hdr.copyHeader(), rr.Preference, rr.Locator64} +} +func (rr *LOC) copy() RR { + return &LOC{*rr.Hdr.copyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} +} +func (rr *LP) copy() RR { + return &LP{*rr.Hdr.copyHeader(), rr.Preference, rr.Fqdn} +} +func (rr *MB) copy() RR { + return &MB{*rr.Hdr.copyHeader(), rr.Mb} +} +func (rr *MD) copy() RR { + return &MD{*rr.Hdr.copyHeader(), rr.Md} +} +func (rr *MF) copy() RR { + return &MF{*rr.Hdr.copyHeader(), rr.Mf} +} +func (rr *MG) copy() RR { + return &MG{*rr.Hdr.copyHeader(), rr.Mg} +} +func (rr *MINFO) copy() RR { + return &MINFO{*rr.Hdr.copyHeader(), rr.Rmail, rr.Email} +} +func (rr *MR) copy() RR { + return &MR{*rr.Hdr.copyHeader(), rr.Mr} +} +func (rr *MX) copy() RR { + return &MX{*rr.Hdr.copyHeader(), rr.Preference, rr.Mx} +} +func (rr *NAPTR) copy() RR { + return &NAPTR{*rr.Hdr.copyHeader(), rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} +} +func (rr *NID) copy() RR { + return &NID{*rr.Hdr.copyHeader(), rr.Preference, rr.NodeID} +} +func (rr *NIMLOC) copy() RR { + return &NIMLOC{*rr.Hdr.copyHeader(), rr.Locator} +} +func (rr *NINFO) copy() RR { + ZSData := make([]string, len(rr.ZSData)) + copy(ZSData, rr.ZSData) + return &NINFO{*rr.Hdr.copyHeader(), ZSData} +} +func (rr *NS) copy() RR { + return &NS{*rr.Hdr.copyHeader(), rr.Ns} +} +func (rr *NSAPPTR) copy() RR { + return &NSAPPTR{*rr.Hdr.copyHeader(), rr.Ptr} +} +func (rr *NSEC) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &NSEC{*rr.Hdr.copyHeader(), rr.NextDomain, TypeBitMap} +} +func (rr *NSEC3) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &NSEC3{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} +} +func (rr *NSEC3PARAM) copy() RR { + return &NSEC3PARAM{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} +} +func (rr *OPENPGPKEY) copy() RR { + return &OPENPGPKEY{*rr.Hdr.copyHeader(), rr.PublicKey} +} +func (rr *OPT) copy() RR { + Option := make([]EDNS0, len(rr.Option)) + copy(Option, rr.Option) + return &OPT{*rr.Hdr.copyHeader(), Option} +} +func (rr *PTR) copy() RR { + return &PTR{*rr.Hdr.copyHeader(), rr.Ptr} +} +func (rr *PX) copy() RR { + return &PX{*rr.Hdr.copyHeader(), rr.Preference, rr.Map822, rr.Mapx400} +} +func (rr *RFC3597) copy() RR { + return &RFC3597{*rr.Hdr.copyHeader(), rr.Rdata} +} +func (rr *RKEY) copy() RR { + return &RKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} +} +func (rr *RP) copy() RR { + return &RP{*rr.Hdr.copyHeader(), rr.Mbox, rr.Txt} +} +func (rr *RRSIG) copy() RR { + return &RRSIG{*rr.Hdr.copyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} +} +func (rr *RT) copy() RR { + return &RT{*rr.Hdr.copyHeader(), rr.Preference, rr.Host} +} +func (rr *SOA) copy() RR { + return &SOA{*rr.Hdr.copyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} +} +func (rr *SPF) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &SPF{*rr.Hdr.copyHeader(), Txt} +} +func (rr *SRV) copy() RR { + return &SRV{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target} +} +func (rr *SSHFP) copy() RR { + return &SSHFP{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint} +} +func (rr *TA) copy() RR { + return &TA{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} +} +func (rr *TALINK) copy() RR { + return &TALINK{*rr.Hdr.copyHeader(), rr.PreviousName, rr.NextName} +} +func (rr *TKEY) copy() RR { + return &TKEY{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} +} +func (rr *TLSA) copy() RR { + return &TLSA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} +} +func (rr *TSIG) copy() RR { + return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} +} +func (rr *TXT) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &TXT{*rr.Hdr.copyHeader(), Txt} +} +func (rr *UID) copy() RR { + return &UID{*rr.Hdr.copyHeader(), rr.Uid} +} +func (rr *UINFO) copy() RR { + return &UINFO{*rr.Hdr.copyHeader(), rr.Uinfo} +} +func (rr *URI) copy() RR { + return &URI{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Target} +} +func (rr *X25) copy() RR { + return &X25{*rr.Hdr.copyHeader(), rr.PSDNAddress} +} diff --git a/vendor/github.com/weppos/dnsimple-go/LICENSE b/vendor/github.com/weppos/dnsimple-go/LICENSE new file mode 100644 index 000000000000..5377f6daeee5 --- /dev/null +++ b/vendor/github.com/weppos/dnsimple-go/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Scott Barron + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/weppos/dnsimple-go/dnsimple/authentication.go b/vendor/github.com/weppos/dnsimple-go/dnsimple/authentication.go new file mode 100644 index 000000000000..cd3a823c4716 --- /dev/null +++ b/vendor/github.com/weppos/dnsimple-go/dnsimple/authentication.go @@ -0,0 +1,73 @@ +package dnsimple + +import ( + "encoding/base64" + "fmt" +) + +const ( + httpHeaderDomainToken = "X-DNSimple-Domain-Token" + httpHeaderApiToken = "X-DNSimple-Token" + httpHeaderAuthorization = "Authorization" +) + +// Provides credentials that can be used for authenticating with DNSimple +// +// More information on credentials may be found here: +// http://developer.dnsimple.com/v2/#authentication +type Credentials interface { + // Get the HTTP header key and value to use for authentication. + HttpHeader() (string, string) +} + +// Domain token authentication + +type domainTokenCredentials struct { + domainToken string +} + +// Construct Credentials using the DNSimple Domain Token method +func NewDomainTokenCredentials(domainToken string) Credentials { + return &domainTokenCredentials{domainToken: domainToken} +} + +func (c *domainTokenCredentials) HttpHeader() (string, string) { + return httpHeaderDomainToken, c.domainToken +} + +// HTTP basic authentication + +type httpBasicCredentials struct { + email string + password string +} + +// Construct Credentials using HTTP Basic Auth +func NewHttpBasicCredentials(email, password string) Credentials { + return &httpBasicCredentials{email, password} +} + +func (c *httpBasicCredentials) HttpHeader() (string, string) { + return httpHeaderAuthorization, "Basic " + basicAuth(c.email, c.password) +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +// API token authentication + +type apiTokenCredentials struct { + email string + apiToken string +} + +// Construct Credentials using the API Token method. +func NewApiTokenCredentials(email, apiToken string) Credentials { + return &apiTokenCredentials{email: email, apiToken: apiToken} +} + +func (c *apiTokenCredentials) HttpHeader() (string, string) { + return httpHeaderApiToken, fmt.Sprintf("%v:%v", c.email, c.apiToken) +} diff --git a/vendor/github.com/weppos/dnsimple-go/dnsimple/contacts.go b/vendor/github.com/weppos/dnsimple-go/dnsimple/contacts.go new file mode 100644 index 000000000000..6a53be767f9d --- /dev/null +++ b/vendor/github.com/weppos/dnsimple-go/dnsimple/contacts.go @@ -0,0 +1,121 @@ +package dnsimple + +import ( + "fmt" + "time" +) + +// ContactsService handles communication with the contact related +// methods of the DNSimple API. +// +// DNSimple API docs: http://developer.dnsimple.com/contacts/ +type ContactsService struct { + client *Client +} + +type Contact struct { + Id int `json:"id,omitempty"` + Label string `json:"label,omitempty"` + FirstName string `json:"first_name,omitempty"` + LastName string `json:"last_name,omitempty"` + JobTitle string `json:"job_title,omitempty"` + Organization string `json:"organization_name,omitempty"` + Email string `json:"email_address,omitempty"` + Phone string `json:"phone,omitempty"` + Fax string `json:"fax,omitempty"` + Address1 string `json:"address1,omitempty"` + Address2 string `json:"address2,omitempty"` + City string `json:"city,omitempty"` + Zip string `json:"postal_code,omitempty"` + Country string `json:"country,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` +} + +type contactWrapper struct { + Contact Contact `json:"contact"` +} + +// contactPath generates the resource path for given contact. +func contactPath(contact interface{}) string { + if contact != nil { + return fmt.Sprintf("contacts/%d", contact) + } + return "contacts" +} + +// List the contacts. +// +// DNSimple API docs: http://developer.dnsimple.com/contacts/#list +func (s *ContactsService) List() ([]Contact, *Response, error) { + path := contactPath(nil) + wrappedContacts := []contactWrapper{} + + res, err := s.client.get(path, &wrappedContacts) + if err != nil { + return []Contact{}, res, err + } + + contacts := []Contact{} + for _, contact := range wrappedContacts { + contacts = append(contacts, contact.Contact) + } + + return contacts, res, nil +} + +// Create a new contact. +// +// DNSimple API docs: http://developer.dnsimple.com/contacts/#create +func (s *ContactsService) Create(contactAttributes Contact) (Contact, *Response, error) { + path := contactPath(nil) + wrappedContact := contactWrapper{Contact: contactAttributes} + returnedContact := contactWrapper{} + + res, err := s.client.post(path, wrappedContact, &returnedContact) + if err != nil { + return Contact{}, res, err + } + + return returnedContact.Contact, res, nil +} + +// Get fetches a contact. +// +// DNSimple API docs: http://developer.dnsimple.com/contacts/#get +func (s *ContactsService) Get(contactID int) (Contact, *Response, error) { + path := contactPath(contactID) + wrappedContact := contactWrapper{} + + res, err := s.client.get(path, &wrappedContact) + if err != nil { + return Contact{}, res, err + } + + return wrappedContact.Contact, res, nil +} + +// Update a contact. +// +// DNSimple API docs: http://developer.dnsimple.com/contacts/#update +func (s *ContactsService) Update(contactID int, contactAttributes Contact) (Contact, *Response, error) { + path := contactPath(contactID) + wrappedContact := contactWrapper{Contact: contactAttributes} + returnedContact := contactWrapper{} + + res, err := s.client.put(path, wrappedContact, &returnedContact) + if err != nil { + return Contact{}, res, err + } + + return returnedContact.Contact, res, nil +} + +// Delete a contact. +// +// DNSimple API docs: http://developer.dnsimple.com/contacts/#delete +func (s *ContactsService) Delete(contactID int) (*Response, error) { + path := contactPath(contactID) + + return s.client.delete(path, nil) +} diff --git a/vendor/github.com/weppos/dnsimple-go/dnsimple/dnsimple.go b/vendor/github.com/weppos/dnsimple-go/dnsimple/dnsimple.go new file mode 100644 index 000000000000..d2cfd6270a35 --- /dev/null +++ b/vendor/github.com/weppos/dnsimple-go/dnsimple/dnsimple.go @@ -0,0 +1,192 @@ +// Package dnsimple implements a client for the DNSimple API. +// +// In order to use this package you will need a DNSimple account and your API Token. +package dnsimple + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "time" +) + +const ( + libraryVersion = "0.1" + baseURL = "https://api.dnsimple.com/" + userAgent = "dnsimple-go/" + libraryVersion + + apiVersion = "v1" +) + +type Client struct { + // HTTP client used to communicate with the API. + HttpClient *http.Client + + // Credentials used for accessing the DNSimple API + Credentials Credentials + + // Base URL for API requests. + // Defaults to the public DNSimple API, but can be set to a different endpoint (e.g. the sandbox). + // BaseURL should always be specified with a trailing slash. + BaseURL string + + // User agent used when communicating with the DNSimple API. + UserAgent string + + // Services used for talking to different parts of the DNSimple API. + Contacts *ContactsService + Domains *DomainsService + Registrar *RegistrarService + Users *UsersService +} + +// NewClient returns a new DNSimple API client. +func NewClient(apiToken, email string) *Client { + return NewAuthenticatedClient(NewApiTokenCredentials(email, apiToken)) + +} + +// NewAuthenticatedClient returns a new DNSimple API client using the given +// credentials. +func NewAuthenticatedClient(credentials Credentials) *Client { + c := &Client{Credentials: credentials, HttpClient: &http.Client{}, BaseURL: baseURL, UserAgent: userAgent} + c.Contacts = &ContactsService{client: c} + c.Domains = &DomainsService{client: c} + c.Registrar = &RegistrarService{client: c} + c.Users = &UsersService{client: c} + return c +} + +// NewRequest creates an API request. +// The path is expected to be a relative path and will be resolved +// according to the BaseURL of the Client. Paths should always be specified without a preceding slash. +func (client *Client) NewRequest(method, path string, payload interface{}) (*http.Request, error) { + url := client.BaseURL + fmt.Sprintf("%s/%s", apiVersion, path) + + body := new(bytes.Buffer) + if payload != nil { + err := json.NewEncoder(body).Encode(payload) + if err != nil { + return nil, err + } + } + + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Add("Accept", "application/json") + req.Header.Add("User-Agent", client.UserAgent) + req.Header.Add(client.Credentials.HttpHeader()) + + return req, nil +} + +func (c *Client) get(path string, v interface{}) (*Response, error) { + return c.Do("GET", path, nil, v) +} + +func (c *Client) post(path string, payload, v interface{}) (*Response, error) { + return c.Do("POST", path, payload, v) +} + +func (c *Client) put(path string, payload, v interface{}) (*Response, error) { + return c.Do("PUT", path, payload, v) +} + +func (c *Client) delete(path string, payload interface{}) (*Response, error) { + return c.Do("DELETE", path, payload, nil) +} + +// Do sends an API request and returns the API response. +// The API response is JSON decoded and stored in the value pointed by v, +// or returned as an error if an API error has occurred. +// If v implements the io.Writer interface, the raw response body will be written to v, +// without attempting to decode it. +func (c *Client) Do(method, path string, payload, v interface{}) (*Response, error) { + req, err := c.NewRequest(method, path, payload) + if err != nil { + return nil, err + } + + res, err := c.HttpClient.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + + response := &Response{Response: res} + + err = CheckResponse(res) + if err != nil { + return response, err + } + + if v != nil { + if w, ok := v.(io.Writer); ok { + io.Copy(w, res.Body) + } else { + err = json.NewDecoder(res.Body).Decode(v) + } + } + + return response, err +} + +// A Response represents an API response. +type Response struct { + *http.Response +} + +// An ErrorResponse represents an error caused by an API request. +type ErrorResponse struct { + Response *http.Response // HTTP response that caused this error + Message string `json:"message"` // human-readable message +} + +// Error implements the error interface. +func (r *ErrorResponse) Error() string { + return fmt.Sprintf("%v %v: %d %v", + r.Response.Request.Method, r.Response.Request.URL, + r.Response.StatusCode, r.Message) +} + +// CheckResponse checks the API response for errors, and returns them if present. +// A response is considered an error if the status code is different than 2xx. Specific requests +// may have additional requirements, but this is sufficient in most of the cases. +func CheckResponse(r *http.Response) error { + if code := r.StatusCode; 200 <= code && code <= 299 { + return nil + } + + errorResponse := &ErrorResponse{Response: r} + err := json.NewDecoder(r.Body).Decode(errorResponse) + if err != nil { + return err + } + + return errorResponse +} + +// Date custom type. +type Date struct { + time.Time +} + +// UnmarshalJSON handles the deserialization of the custom Date type. +func (d *Date) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return fmt.Errorf("date should be a string, got %s", data) + } + t, err := time.Parse("2006-01-02", s) + if err != nil { + return fmt.Errorf("invalid date: %v", err) + } + d.Time = t + return nil +} diff --git a/vendor/github.com/weppos/dnsimple-go/dnsimple/domains.go b/vendor/github.com/weppos/dnsimple-go/dnsimple/domains.go new file mode 100644 index 000000000000..317a7a79ab25 --- /dev/null +++ b/vendor/github.com/weppos/dnsimple-go/dnsimple/domains.go @@ -0,0 +1,121 @@ +package dnsimple + +import ( + "fmt" + "time" +) + +// DomainsService handles communication with the domain related +// methods of the DNSimple API. +// +// DNSimple API docs: http://developer.dnsimple.com/domains/ +type DomainsService struct { + client *Client +} + +type Domain struct { + Id int `json:"id,omitempty"` + UserId int `json:"user_id,omitempty"` + RegistrantId int `json:"registrant_id,omitempty"` + Name string `json:"name,omitempty"` + UnicodeName string `json:"unicode_name,omitempty"` + Token string `json:"token,omitempty"` + State string `json:"state,omitempty"` + Language string `json:"language,omitempty"` + Lockable bool `json:"lockable,omitempty"` + AutoRenew bool `json:"auto_renew,omitempty"` + WhoisProtected bool `json:"whois_protected,omitempty"` + RecordCount int `json:"record_count,omitempty"` + ServiceCount int `json:"service_count,omitempty"` + ExpiresOn *Date `json:"expires_on,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` +} + +type domainWrapper struct { + Domain Domain `json:"domain"` +} + +// domainRequest represents a generic wrapper for a domain request, +// when domainWrapper cannot be used because of type constraint on Domain. +type domainRequest struct { + Domain interface{} `json:"domain"` +} + +func domainIdentifier(value interface{}) string { + switch value := value.(type) { + case string: + return value + case int: + return fmt.Sprintf("%d", value) + } + return "" +} + +// domainPath generates the resource path for given domain. +func domainPath(domain interface{}) string { + if domain != nil { + return fmt.Sprintf("domains/%s", domainIdentifier(domain)) + } + return "domains" +} + +// List the domains. +// +// DNSimple API docs: http://developer.dnsimple.com/domains/#list +func (s *DomainsService) List() ([]Domain, *Response, error) { + path := domainPath(nil) + returnedDomains := []domainWrapper{} + + res, err := s.client.get(path, &returnedDomains) + if err != nil { + return []Domain{}, res, err + } + + domains := []Domain{} + for _, domain := range returnedDomains { + domains = append(domains, domain.Domain) + } + + return domains, res, nil +} + +// Create a new domain. +// +// DNSimple API docs: http://developer.dnsimple.com/domains/#create +func (s *DomainsService) Create(domainAttributes Domain) (Domain, *Response, error) { + path := domainPath(nil) + wrappedDomain := domainWrapper{Domain: domainAttributes} + returnedDomain := domainWrapper{} + + res, err := s.client.post(path, wrappedDomain, &returnedDomain) + if err != nil { + return Domain{}, res, err + } + + return returnedDomain.Domain, res, nil +} + +// Get fetches a domain. +// +// DNSimple API docs: http://developer.dnsimple.com/domains/#get +func (s *DomainsService) Get(domain interface{}) (Domain, *Response, error) { + path := domainPath(domain) + returnedDomain := domainWrapper{} + + res, err := s.client.get(path, &returnedDomain) + if err != nil { + return Domain{}, res, err + } + + return returnedDomain.Domain, res, nil +} + +// Delete a domain. +// +// DNSimple API docs: http://developer.dnsimple.com/domains/#delete +func (s *DomainsService) Delete(domain interface{}) (*Response, error) { + path := domainPath(domain) + + return s.client.delete(path, nil) +} diff --git a/vendor/github.com/weppos/dnsimple-go/dnsimple/domains_records.go b/vendor/github.com/weppos/dnsimple-go/dnsimple/domains_records.go new file mode 100644 index 000000000000..8a6e35b23b2b --- /dev/null +++ b/vendor/github.com/weppos/dnsimple-go/dnsimple/domains_records.go @@ -0,0 +1,136 @@ +package dnsimple + +import ( + "fmt" + "net/url" + "time" +) + +type Record struct { + Id int `json:"id,omitempty"` + DomainId int `json:"domain_id,omitempty"` + Name string `json:"name,omitempty"` + Content string `json:"content,omitempty"` + TTL int `json:"ttl,omitempty"` + Priority int `json:"prio,omitempty"` + Type string `json:"record_type,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` +} + +type recordWrapper struct { + Record Record `json:"record"` +} + +// recordPath generates the resource path for given record that belongs to a domain. +func recordPath(domain interface{}, record interface{}) string { + path := fmt.Sprintf("domains/%s/records", domainIdentifier(domain)) + + if record != nil { + path += fmt.Sprintf("/%d", record) + } + + return path +} + +// List the domain records. +// +// DNSimple API docs: http://developer.dnsimple.com/domains/records/#list +func (s *DomainsService) ListRecords(domain interface{}, recordName, recordType string) ([]Record, *Response, error) { + reqStr := recordPath(domain, nil) + v := url.Values{} + + if recordName != "" { + v.Add("name", recordName) + } + if recordType != "" { + v.Add("type", recordType) + } + reqStr += "?" + v.Encode() + + wrappedRecords := []recordWrapper{} + + res, err := s.client.get(reqStr, &wrappedRecords) + if err != nil { + return []Record{}, res, err + } + + records := []Record{} + for _, record := range wrappedRecords { + records = append(records, record.Record) + } + + return records, res, nil +} + +// CreateRecord creates a domain record. +// +// DNSimple API docs: http://developer.dnsimple.com/domains/records/#create +func (s *DomainsService) CreateRecord(domain interface{}, recordAttributes Record) (Record, *Response, error) { + path := recordPath(domain, nil) + wrappedRecord := recordWrapper{Record: recordAttributes} + returnedRecord := recordWrapper{} + + res, err := s.client.post(path, wrappedRecord, &returnedRecord) + if err != nil { + return Record{}, res, err + } + + return returnedRecord.Record, res, nil +} + +// GetRecord fetches the domain record. +// +// DNSimple API docs: http://developer.dnsimple.com/domains/records/#get +func (s *DomainsService) GetRecord(domain interface{}, recordID int) (Record, *Response, error) { + path := recordPath(domain, recordID) + wrappedRecord := recordWrapper{} + + res, err := s.client.get(path, &wrappedRecord) + if err != nil { + return Record{}, res, err + } + + return wrappedRecord.Record, res, nil +} + +// UpdateRecord updates a domain record. +// +// DNSimple API docs: http://developer.dnsimple.com/domains/records/#update +func (s *DomainsService) UpdateRecord(domain interface{}, recordID int, recordAttributes Record) (Record, *Response, error) { + path := recordPath(domain, recordID) + // name, content, ttl, priority + wrappedRecord := recordWrapper{ + Record: Record{ + Name: recordAttributes.Name, + Content: recordAttributes.Content, + TTL: recordAttributes.TTL, + Priority: recordAttributes.Priority}} + returnedRecord := recordWrapper{} + + res, err := s.client.put(path, wrappedRecord, &returnedRecord) + if err != nil { + return Record{}, res, err + } + + return returnedRecord.Record, res, nil +} + +// DeleteRecord deletes a domain record. +// +// DNSimple API docs: http://developer.dnsimple.com/domains/records/#delete +func (s *DomainsService) DeleteRecord(domain interface{}, recordID int) (*Response, error) { + path := recordPath(domain, recordID) + + return s.client.delete(path, nil) +} + +// UpdateIP updates the IP of specific A record. +// +// This is not part of the standard API. However, +// this is useful for Dynamic DNS (DDNS or DynDNS). +func (record *Record) UpdateIP(client *Client, IP string) error { + newRecord := Record{Content: IP, Name: record.Name} + _, _, err := client.Domains.UpdateRecord(record.DomainId, record.Id, newRecord) + return err +} diff --git a/vendor/github.com/weppos/dnsimple-go/dnsimple/domains_zones.go b/vendor/github.com/weppos/dnsimple-go/dnsimple/domains_zones.go new file mode 100644 index 000000000000..8765bfde6393 --- /dev/null +++ b/vendor/github.com/weppos/dnsimple-go/dnsimple/domains_zones.go @@ -0,0 +1,24 @@ +package dnsimple + +import ( + "fmt" +) + +type zoneResponse struct { + Zone string `json:"zone,omitempty"` +} + +// GetZone downloads the Bind-like zone file. +// +// DNSimple API docs: http://developer.dnsimple.com/domains/zones/#get +func (s *DomainsService) GetZone(domain interface{}) (string, *Response, error) { + path := fmt.Sprintf("%s/zone", domainPath(domain)) + zoneResponse := zoneResponse{} + + res, err := s.client.get(path, &zoneResponse) + if err != nil { + return "", res, err + } + + return zoneResponse.Zone, res, nil +} diff --git a/vendor/github.com/weppos/dnsimple-go/dnsimple/registrar.go b/vendor/github.com/weppos/dnsimple-go/dnsimple/registrar.go new file mode 100644 index 000000000000..38cd85ea9a1d --- /dev/null +++ b/vendor/github.com/weppos/dnsimple-go/dnsimple/registrar.go @@ -0,0 +1,131 @@ +package dnsimple + +import ( + "fmt" +) + +// RegistrarService handles communication with the registrar related +// methods of the DNSimple API. +// +// DNSimple API docs: http://developer.dnsimple.com/registrar/ +type RegistrarService struct { + client *Client +} + +// ExtendedAttributes maps the additional attributes required by some registries. +type ExtendedAttributes map[string]string + +// ExtendedAttributes represents the transfer information. +type TransferOrder struct { + AuthCode string `json:"authinfo,omitempty"` +} + +// registrationRequest represents the body of a register or transfer request. +type registrationRequest struct { + Domain Domain `json:"domain"` + ExtendedAttributes *ExtendedAttributes `json:"extended_attribute,omitempty"` + TransferOrder *TransferOrder `json:"transfer_order,omitempty"` +} + +// IsAvailable checks if the domain is available or registered. +// +// See: http://developer.dnsimple.com/registrar/#check +func (s *RegistrarService) IsAvailable(domain string) (bool, error) { + path := fmt.Sprintf("%s/check", domainPath(domain)) + + res, err := s.client.get(path, nil) + if err != nil && res != nil && res.StatusCode != 404 { + return false, err + } + + return res.StatusCode == 404, nil +} + +// Register a domain. +// +// DNSimple API docs: http://developer.dnsimple.com/registrar/#register +func (s *RegistrarService) Register(domain string, registrantID int, extendedAttributes *ExtendedAttributes) (Domain, *Response, error) { + request := registrationRequest{ + Domain: Domain{Name: domain, RegistrantId: registrantID}, + ExtendedAttributes: extendedAttributes, + } + returnedDomain := domainWrapper{} + + res, err := s.client.post("domain_registrations", request, &returnedDomain) + if err != nil { + return Domain{}, res, err + } + + return returnedDomain.Domain, res, nil +} + +// Transfer a domain. +// +// DNSimple API docs: http://developer.dnsimple.com/registrar/#transfer +func (s *RegistrarService) Transfer(domain string, registrantID int, authCode string, extendedAttributes *ExtendedAttributes) (Domain, *Response, error) { + request := registrationRequest{ + Domain: Domain{Name: domain, RegistrantId: registrantID}, + ExtendedAttributes: extendedAttributes, + TransferOrder: &TransferOrder{AuthCode: authCode}, + } + returnedDomain := domainWrapper{} + + res, err := s.client.post("domain_transfers", request, &returnedDomain) + if err != nil { + return Domain{}, res, err + } + + return returnedDomain.Domain, res, nil +} + +// renewDomain represents the body of a Renew request. +type renewDomain struct { + Name string `json:"name,omitempty"` + RenewWhoisPrivacy bool `json:"renew_whois_privacy,omitempty"` +} + +// Renew the domain, optionally renewing WHOIS privacy service. +// +// DNSimple API docs: http://developer.dnsimple.com/registrar/#renew +func (s *RegistrarService) Renew(domain string, renewWhoisPrivacy bool) (Domain, *Response, error) { + request := domainRequest{Domain: renewDomain{ + Name: domain, + RenewWhoisPrivacy: renewWhoisPrivacy, + }} + returnedDomain := domainWrapper{} + + res, err := s.client.post("domain_renewals", request, &returnedDomain) + if err != nil { + return Domain{}, res, err + } + + return returnedDomain.Domain, res, nil +} + +// EnableAutoRenewal enables the auto-renewal feature for the domain. +// +// DNSimple API docs: http://developer.dnsimple.com/registrar/autorenewal/#enable +func (s *RegistrarService) EnableAutoRenewal(domain interface{}) (*Response, error) { + path := fmt.Sprintf("%s/auto_renewal", domainPath(domain)) + + res, err := s.client.post(path, nil, nil) + if err != nil { + return res, err + } + + return res, nil +} + +// DisableAutoRenewal disables the auto-renewal feature for the domain. +// +// DNSimple API docs: http://developer.dnsimple.com/registrar/autorenewal/#disable +func (s *RegistrarService) DisableAutoRenewal(domain interface{}) (*Response, error) { + path := fmt.Sprintf("%s/auto_renewal", domainPath(domain)) + + res, err := s.client.delete(path, nil) + if err != nil { + return res, err + } + + return res, nil +} diff --git a/vendor/github.com/weppos/dnsimple-go/dnsimple/users.go b/vendor/github.com/weppos/dnsimple-go/dnsimple/users.go new file mode 100644 index 000000000000..601fb09f9918 --- /dev/null +++ b/vendor/github.com/weppos/dnsimple-go/dnsimple/users.go @@ -0,0 +1,35 @@ +package dnsimple + +import () + +// UsersService handles communication with the uer related +// methods of the DNSimple API. +// +// DNSimple API docs: http://developer.dnsimple.com/users/ +type UsersService struct { + client *Client +} + +// User represents a DNSimple user. +type User struct { + Id int `json:"id,omitempty"` + Email string `json:"email,omitempty"` +} + +type userWrapper struct { + User User `json:"user"` +} + +// User gets the logged in user. +// +// DNSimple API docs: http://developer.dnsimple.com/users/ +func (s *UsersService) User() (User, *Response, error) { + wrappedUser := userWrapper{} + + res, err := s.client.get("user", &wrappedUser) + if err != nil { + return User{}, res, err + } + + return wrappedUser.User, res, nil +} diff --git a/vendor/github.com/xenolf/lego/LICENSE b/vendor/github.com/xenolf/lego/LICENSE new file mode 100644 index 000000000000..17460b716630 --- /dev/null +++ b/vendor/github.com/xenolf/lego/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Sebastian Erhart + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/xenolf/lego/acme/challenges.go b/vendor/github.com/xenolf/lego/acme/challenges.go new file mode 100644 index 000000000000..857900507c08 --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/challenges.go @@ -0,0 +1,16 @@ +package acme + +// Challenge is a string that identifies a particular type and version of ACME challenge. +type Challenge string + +const ( + // HTTP01 is the "http-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#http + // Note: HTTP01ChallengePath returns the URL path to fulfill this challenge + HTTP01 = Challenge("http-01") + // TLSSNI01 is the "tls-sni-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#tls-with-server-name-indication-tls-sni + // Note: TLSSNI01ChallengeCert returns a certificate to fulfill this challenge + TLSSNI01 = Challenge("tls-sni-01") + // DNS01 is the "dns-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#dns + // Note: DNS01Record returns a DNS record which will fulfill this challenge + DNS01 = Challenge("dns-01") +) diff --git a/vendor/github.com/xenolf/lego/acme/client.go b/vendor/github.com/xenolf/lego/acme/client.go new file mode 100644 index 000000000000..d4febfb2476c --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/client.go @@ -0,0 +1,798 @@ +// Package acme implements the ACME protocol for Let's Encrypt and other conforming providers. +package acme + +import ( + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "regexp" + "strconv" + "strings" + "time" +) + +var ( + // Logger is an optional custom logger. + Logger *log.Logger +) + +// logf writes a log entry. It uses Logger if not +// nil, otherwise it uses the default log.Logger. +func logf(format string, args ...interface{}) { + if Logger != nil { + Logger.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// User interface is to be implemented by users of this library. +// It is used by the client type to get user specific information. +type User interface { + GetEmail() string + GetRegistration() *RegistrationResource + GetPrivateKey() crypto.PrivateKey +} + +// Interface for all challenge solvers to implement. +type solver interface { + Solve(challenge challenge, domain string) error +} + +type validateFunc func(j *jws, domain, uri string, chlng challenge) error + +// Client is the user-friendy way to ACME +type Client struct { + directory directory + user User + jws *jws + keyType KeyType + issuerCert []byte + solvers map[Challenge]solver +} + +// NewClient creates a new ACME client on behalf of the user. The client will depend on +// the ACME directory located at caDirURL for the rest of its actions. It will +// generate private keys for certificates of size keyBits. +func NewClient(caDirURL string, user User, keyType KeyType) (*Client, error) { + privKey := user.GetPrivateKey() + if privKey == nil { + return nil, errors.New("private key was nil") + } + + var dir directory + if _, err := getJSON(caDirURL, &dir); err != nil { + return nil, fmt.Errorf("get directory at '%s': %v", caDirURL, err) + } + + if dir.NewRegURL == "" { + return nil, errors.New("directory missing new registration URL") + } + if dir.NewAuthzURL == "" { + return nil, errors.New("directory missing new authz URL") + } + if dir.NewCertURL == "" { + return nil, errors.New("directory missing new certificate URL") + } + if dir.RevokeCertURL == "" { + return nil, errors.New("directory missing revoke certificate URL") + } + + jws := &jws{privKey: privKey, directoryURL: caDirURL} + + // REVIEW: best possibility? + // Add all available solvers with the right index as per ACME + // spec to this map. Otherwise they won`t be found. + solvers := make(map[Challenge]solver) + solvers[HTTP01] = &httpChallenge{jws: jws, validate: validate, provider: &HTTPProviderServer{}} + solvers[TLSSNI01] = &tlsSNIChallenge{jws: jws, validate: validate, provider: &TLSProviderServer{}} + + return &Client{directory: dir, user: user, jws: jws, keyType: keyType, solvers: solvers}, nil +} + +// SetChallengeProvider specifies a custom provider that will make the solution available +func (c *Client) SetChallengeProvider(challenge Challenge, p ChallengeProvider) error { + switch challenge { + case HTTP01: + c.solvers[challenge] = &httpChallenge{jws: c.jws, validate: validate, provider: p} + case TLSSNI01: + c.solvers[challenge] = &tlsSNIChallenge{jws: c.jws, validate: validate, provider: p} + case DNS01: + c.solvers[challenge] = &dnsChallenge{jws: c.jws, validate: validate, provider: p} + default: + return fmt.Errorf("Unknown challenge %v", challenge) + } + return nil +} + +// SetHTTPAddress specifies a custom interface:port to be used for HTTP based challenges. +// If this option is not used, the default port 80 and all interfaces will be used. +// To only specify a port and no interface use the ":port" notation. +func (c *Client) SetHTTPAddress(iface string) error { + host, port, err := net.SplitHostPort(iface) + if err != nil { + return err + } + + if chlng, ok := c.solvers[HTTP01]; ok { + chlng.(*httpChallenge).provider = NewHTTPProviderServer(host, port) + } + + return nil +} + +// SetTLSAddress specifies a custom interface:port to be used for TLS based challenges. +// If this option is not used, the default port 443 and all interfaces will be used. +// To only specify a port and no interface use the ":port" notation. +func (c *Client) SetTLSAddress(iface string) error { + host, port, err := net.SplitHostPort(iface) + if err != nil { + return err + } + + if chlng, ok := c.solvers[TLSSNI01]; ok { + chlng.(*tlsSNIChallenge).provider = NewTLSProviderServer(host, port) + } + return nil +} + +// ExcludeChallenges explicitly removes challenges from the pool for solving. +func (c *Client) ExcludeChallenges(challenges []Challenge) { + // Loop through all challenges and delete the requested one if found. + for _, challenge := range challenges { + delete(c.solvers, challenge) + } +} + +// Register the current account to the ACME server. +func (c *Client) Register() (*RegistrationResource, error) { + if c == nil || c.user == nil { + return nil, errors.New("acme: cannot register a nil client or user") + } + logf("[INFO] acme: Registering account for %s", c.user.GetEmail()) + + regMsg := registrationMessage{ + Resource: "new-reg", + } + if c.user.GetEmail() != "" { + regMsg.Contact = []string{"mailto:" + c.user.GetEmail()} + } else { + regMsg.Contact = []string{} + } + + var serverReg Registration + var regURI string + hdr, err := postJSON(c.jws, c.directory.NewRegURL, regMsg, &serverReg) + if err != nil { + remoteErr, ok := err.(RemoteError) + if ok && remoteErr.StatusCode == 409 { + regURI = hdr.Get("Location") + regMsg = registrationMessage{ + Resource: "reg", + } + if hdr, err = postJSON(c.jws, regURI, regMsg, &serverReg); err != nil { + return nil, err + } + } else { + return nil, err + } + } + + reg := &RegistrationResource{Body: serverReg} + + links := parseLinks(hdr["Link"]) + + if regURI == "" { + regURI = hdr.Get("Location") + } + reg.URI = regURI + if links["terms-of-service"] != "" { + reg.TosURL = links["terms-of-service"] + } + + if links["next"] != "" { + reg.NewAuthzURL = links["next"] + } else { + return nil, errors.New("acme: The server did not return 'next' link to proceed") + } + + return reg, nil +} + +// DeleteRegistration deletes the client's user registration from the ACME +// server. +func (c *Client) DeleteRegistration() error { + if c == nil || c.user == nil { + return errors.New("acme: cannot unregister a nil client or user") + } + logf("[INFO] acme: Deleting account for %s", c.user.GetEmail()) + + regMsg := registrationMessage{ + Resource: "reg", + Delete: true, + } + + _, err := postJSON(c.jws, c.user.GetRegistration().URI, regMsg, nil) + if err != nil { + return err + } + + return nil +} + +// QueryRegistration runs a POST request on the client's registration and +// returns the result. +// +// This is similar to the Register function, but acting on an existing +// registration link and resource. +func (c *Client) QueryRegistration() (*RegistrationResource, error) { + if c == nil || c.user == nil { + return nil, errors.New("acme: cannot query the registration of a nil client or user") + } + // Log the URL here instead of the email as the email may not be set + logf("[INFO] acme: Querying account for %s", c.user.GetRegistration().URI) + + regMsg := registrationMessage{ + Resource: "reg", + } + + var serverReg Registration + hdr, err := postJSON(c.jws, c.user.GetRegistration().URI, regMsg, &serverReg) + if err != nil { + return nil, err + } + + reg := &RegistrationResource{Body: serverReg} + + links := parseLinks(hdr["Link"]) + // Location: header is not returned so this needs to be populated off of + // existing URI + reg.URI = c.user.GetRegistration().URI + if links["terms-of-service"] != "" { + reg.TosURL = links["terms-of-service"] + } + + if links["next"] != "" { + reg.NewAuthzURL = links["next"] + } else { + return nil, errors.New("acme: No new-authz link in response to registration query") + } + + return reg, nil +} + +// AgreeToTOS updates the Client registration and sends the agreement to +// the server. +func (c *Client) AgreeToTOS() error { + reg := c.user.GetRegistration() + + reg.Body.Agreement = c.user.GetRegistration().TosURL + reg.Body.Resource = "reg" + _, err := postJSON(c.jws, c.user.GetRegistration().URI, c.user.GetRegistration().Body, nil) + return err +} + +// ObtainCertificateForCSR tries to obtain a certificate matching the CSR passed into it. +// The domains are inferred from the CommonName and SubjectAltNames, if any. The private key +// for this CSR is not required. +// If bundle is true, the []byte contains both the issuer certificate and +// your issued certificate as a bundle. +// This function will never return a partial certificate. If one domain in the list fails, +// the whole certificate will fail. +func (c *Client) ObtainCertificateForCSR(csr x509.CertificateRequest, bundle bool) (CertificateResource, map[string]error) { + // figure out what domains it concerns + // start with the common name + domains := []string{csr.Subject.CommonName} + + // loop over the SubjectAltName DNS names +DNSNames: + for _, sanName := range csr.DNSNames { + for _, existingName := range domains { + if existingName == sanName { + // duplicate; skip this name + continue DNSNames + } + } + + // name is unique + domains = append(domains, sanName) + } + + if bundle { + logf("[INFO][%s] acme: Obtaining bundled SAN certificate given a CSR", strings.Join(domains, ", ")) + } else { + logf("[INFO][%s] acme: Obtaining SAN certificate given a CSR", strings.Join(domains, ", ")) + } + + challenges, failures := c.getChallenges(domains) + // If any challenge fails - return. Do not generate partial SAN certificates. + if len(failures) > 0 { + return CertificateResource{}, failures + } + + errs := c.solveChallenges(challenges) + // If any challenge fails - return. Do not generate partial SAN certificates. + if len(errs) > 0 { + return CertificateResource{}, errs + } + + logf("[INFO][%s] acme: Validations succeeded; requesting certificates", strings.Join(domains, ", ")) + + cert, err := c.requestCertificateForCsr(challenges, bundle, csr.Raw, nil) + if err != nil { + for _, chln := range challenges { + failures[chln.Domain] = err + } + } + + // Add the CSR to the certificate so that it can be used for renewals. + cert.CSR = pemEncode(&csr) + + return cert, failures +} + +// ObtainCertificate tries to obtain a single certificate using all domains passed into it. +// The first domain in domains is used for the CommonName field of the certificate, all other +// domains are added using the Subject Alternate Names extension. A new private key is generated +// for every invocation of this function. If you do not want that you can supply your own private key +// in the privKey parameter. If this parameter is non-nil it will be used instead of generating a new one. +// If bundle is true, the []byte contains both the issuer certificate and +// your issued certificate as a bundle. +// This function will never return a partial certificate. If one domain in the list fails, +// the whole certificate will fail. +func (c *Client) ObtainCertificate(domains []string, bundle bool, privKey crypto.PrivateKey) (CertificateResource, map[string]error) { + if bundle { + logf("[INFO][%s] acme: Obtaining bundled SAN certificate", strings.Join(domains, ", ")) + } else { + logf("[INFO][%s] acme: Obtaining SAN certificate", strings.Join(domains, ", ")) + } + + challenges, failures := c.getChallenges(domains) + // If any challenge fails - return. Do not generate partial SAN certificates. + if len(failures) > 0 { + return CertificateResource{}, failures + } + + errs := c.solveChallenges(challenges) + // If any challenge fails - return. Do not generate partial SAN certificates. + if len(errs) > 0 { + return CertificateResource{}, errs + } + + logf("[INFO][%s] acme: Validations succeeded; requesting certificates", strings.Join(domains, ", ")) + + cert, err := c.requestCertificate(challenges, bundle, privKey) + if err != nil { + for _, chln := range challenges { + failures[chln.Domain] = err + } + } + + return cert, failures +} + +// RevokeCertificate takes a PEM encoded certificate or bundle and tries to revoke it at the CA. +func (c *Client) RevokeCertificate(certificate []byte) error { + certificates, err := parsePEMBundle(certificate) + if err != nil { + return err + } + + x509Cert := certificates[0] + if x509Cert.IsCA { + return fmt.Errorf("Certificate bundle starts with a CA certificate") + } + + encodedCert := base64.URLEncoding.EncodeToString(x509Cert.Raw) + + _, err = postJSON(c.jws, c.directory.RevokeCertURL, revokeCertMessage{Resource: "revoke-cert", Certificate: encodedCert}, nil) + return err +} + +// RenewCertificate takes a CertificateResource and tries to renew the certificate. +// If the renewal process succeeds, the new certificate will ge returned in a new CertResource. +// Please be aware that this function will return a new certificate in ANY case that is not an error. +// If the server does not provide us with a new cert on a GET request to the CertURL +// this function will start a new-cert flow where a new certificate gets generated. +// If bundle is true, the []byte contains both the issuer certificate and +// your issued certificate as a bundle. +// For private key reuse the PrivateKey property of the passed in CertificateResource should be non-nil. +func (c *Client) RenewCertificate(cert CertificateResource, bundle bool) (CertificateResource, error) { + // Input certificate is PEM encoded. Decode it here as we may need the decoded + // cert later on in the renewal process. The input may be a bundle or a single certificate. + certificates, err := parsePEMBundle(cert.Certificate) + if err != nil { + return CertificateResource{}, err + } + + x509Cert := certificates[0] + if x509Cert.IsCA { + return CertificateResource{}, fmt.Errorf("[%s] Certificate bundle starts with a CA certificate", cert.Domain) + } + + // This is just meant to be informal for the user. + timeLeft := x509Cert.NotAfter.Sub(time.Now().UTC()) + logf("[INFO][%s] acme: Trying renewal with %d hours remaining", cert.Domain, int(timeLeft.Hours())) + + // The first step of renewal is to check if we get a renewed cert + // directly from the cert URL. + resp, err := httpGet(cert.CertURL) + if err != nil { + return CertificateResource{}, err + } + defer resp.Body.Close() + serverCertBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return CertificateResource{}, err + } + + serverCert, err := x509.ParseCertificate(serverCertBytes) + if err != nil { + return CertificateResource{}, err + } + + // If the server responds with a different certificate we are effectively renewed. + // TODO: Further test if we can actually use the new certificate (Our private key works) + if !x509Cert.Equal(serverCert) { + logf("[INFO][%s] acme: Server responded with renewed certificate", cert.Domain) + issuedCert := pemEncode(derCertificateBytes(serverCertBytes)) + // If bundle is true, we want to return a certificate bundle. + // To do this, we need the issuer certificate. + if bundle { + // The issuer certificate link is always supplied via an "up" link + // in the response headers of a new certificate. + links := parseLinks(resp.Header["Link"]) + issuerCert, err := c.getIssuerCertificate(links["up"]) + if err != nil { + // If we fail to acquire the issuer cert, return the issued certificate - do not fail. + logf("[ERROR][%s] acme: Could not bundle issuer certificate: %v", cert.Domain, err) + } else { + // Success - append the issuer cert to the issued cert. + issuerCert = pemEncode(derCertificateBytes(issuerCert)) + issuedCert = append(issuedCert, issuerCert...) + } + } + + cert.Certificate = issuedCert + return cert, nil + } + + // If the certificate is the same, then we need to request a new certificate. + // Start by checking to see if the certificate was based off a CSR, and + // use that if it's defined. + if len(cert.CSR) > 0 { + csr, err := pemDecodeTox509CSR(cert.CSR) + if err != nil { + return CertificateResource{}, err + } + newCert, failures := c.ObtainCertificateForCSR(*csr, bundle) + return newCert, failures[cert.Domain] + } + + var privKey crypto.PrivateKey + if cert.PrivateKey != nil { + privKey, err = parsePEMPrivateKey(cert.PrivateKey) + if err != nil { + return CertificateResource{}, err + } + } + + var domains []string + var failures map[string]error + // check for SAN certificate + if len(x509Cert.DNSNames) > 1 { + domains = append(domains, x509Cert.Subject.CommonName) + for _, sanDomain := range x509Cert.DNSNames { + if sanDomain == x509Cert.Subject.CommonName { + continue + } + domains = append(domains, sanDomain) + } + } else { + domains = append(domains, x509Cert.Subject.CommonName) + } + + newCert, failures := c.ObtainCertificate(domains, bundle, privKey) + return newCert, failures[cert.Domain] +} + +// Looks through the challenge combinations to find a solvable match. +// Then solves the challenges in series and returns. +func (c *Client) solveChallenges(challenges []authorizationResource) map[string]error { + // loop through the resources, basically through the domains. + failures := make(map[string]error) + for _, authz := range challenges { + // no solvers - no solving + if solvers := c.chooseSolvers(authz.Body, authz.Domain); solvers != nil { + for i, solver := range solvers { + // TODO: do not immediately fail if one domain fails to validate. + err := solver.Solve(authz.Body.Challenges[i], authz.Domain) + if err != nil { + failures[authz.Domain] = err + } + } + } else { + failures[authz.Domain] = fmt.Errorf("[%s] acme: Could not determine solvers", authz.Domain) + } + } + + return failures +} + +// Checks all combinations from the server and returns an array of +// solvers which should get executed in series. +func (c *Client) chooseSolvers(auth authorization, domain string) map[int]solver { + for _, combination := range auth.Combinations { + solvers := make(map[int]solver) + for _, idx := range combination { + if solver, ok := c.solvers[auth.Challenges[idx].Type]; ok { + solvers[idx] = solver + } else { + logf("[INFO][%s] acme: Could not find solver for: %s", domain, auth.Challenges[idx].Type) + } + } + + // If we can solve the whole combination, return the solvers + if len(solvers) == len(combination) { + return solvers + } + } + return nil +} + +// Get the challenges needed to proof our identifier to the ACME server. +func (c *Client) getChallenges(domains []string) ([]authorizationResource, map[string]error) { + resc, errc := make(chan authorizationResource), make(chan domainError) + + for _, domain := range domains { + go func(domain string) { + authMsg := authorization{Resource: "new-authz", Identifier: identifier{Type: "dns", Value: domain}} + var authz authorization + hdr, err := postJSON(c.jws, c.user.GetRegistration().NewAuthzURL, authMsg, &authz) + if err != nil { + errc <- domainError{Domain: domain, Error: err} + return + } + + links := parseLinks(hdr["Link"]) + if links["next"] == "" { + logf("[ERROR][%s] acme: Server did not provide next link to proceed", domain) + return + } + + resc <- authorizationResource{Body: authz, NewCertURL: links["next"], AuthURL: hdr.Get("Location"), Domain: domain} + }(domain) + } + + responses := make(map[string]authorizationResource) + failures := make(map[string]error) + for i := 0; i < len(domains); i++ { + select { + case res := <-resc: + responses[res.Domain] = res + case err := <-errc: + failures[err.Domain] = err.Error + } + } + + challenges := make([]authorizationResource, 0, len(responses)) + for _, domain := range domains { + if challenge, ok := responses[domain]; ok { + challenges = append(challenges, challenge) + } + } + + close(resc) + close(errc) + + return challenges, failures +} + +func (c *Client) requestCertificate(authz []authorizationResource, bundle bool, privKey crypto.PrivateKey) (CertificateResource, error) { + if len(authz) == 0 { + return CertificateResource{}, errors.New("Passed no authorizations to requestCertificate!") + } + + var err error + if privKey == nil { + privKey, err = generatePrivateKey(c.keyType) + if err != nil { + return CertificateResource{}, err + } + } + + // determine certificate name(s) based on the authorization resources + commonName := authz[0] + var san []string + for _, auth := range authz[1:] { + san = append(san, auth.Domain) + } + + // TODO: should the CSR be customizable? + csr, err := generateCsr(privKey, commonName.Domain, san) + if err != nil { + return CertificateResource{}, err + } + + return c.requestCertificateForCsr(authz, bundle, csr, pemEncode(privKey)) +} + +func (c *Client) requestCertificateForCsr(authz []authorizationResource, bundle bool, csr []byte, privateKeyPem []byte) (CertificateResource, error) { + commonName := authz[0] + + var authURLs []string + for _, auth := range authz[1:] { + authURLs = append(authURLs, auth.AuthURL) + } + + csrString := base64.URLEncoding.EncodeToString(csr) + jsonBytes, err := json.Marshal(csrMessage{Resource: "new-cert", Csr: csrString, Authorizations: authURLs}) + if err != nil { + return CertificateResource{}, err + } + + resp, err := c.jws.post(commonName.NewCertURL, jsonBytes) + if err != nil { + return CertificateResource{}, err + } + + cerRes := CertificateResource{ + Domain: commonName.Domain, + CertURL: resp.Header.Get("Location"), + PrivateKey: privateKeyPem} + + for { + switch resp.StatusCode { + case 201, 202: + cert, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024)) + resp.Body.Close() + if err != nil { + return CertificateResource{}, err + } + + // The server returns a body with a length of zero if the + // certificate was not ready at the time this request completed. + // Otherwise the body is the certificate. + if len(cert) > 0 { + + cerRes.CertStableURL = resp.Header.Get("Content-Location") + cerRes.AccountRef = c.user.GetRegistration().URI + + issuedCert := pemEncode(derCertificateBytes(cert)) + // If bundle is true, we want to return a certificate bundle. + // To do this, we need the issuer certificate. + if bundle { + // The issuer certificate link is always supplied via an "up" link + // in the response headers of a new certificate. + links := parseLinks(resp.Header["Link"]) + issuerCert, err := c.getIssuerCertificate(links["up"]) + if err != nil { + // If we fail to acquire the issuer cert, return the issued certificate - do not fail. + logf("[WARNING][%s] acme: Could not bundle issuer certificate: %v", commonName.Domain, err) + } else { + // Success - append the issuer cert to the issued cert. + issuerCert = pemEncode(derCertificateBytes(issuerCert)) + issuedCert = append(issuedCert, issuerCert...) + } + } + + cerRes.Certificate = issuedCert + logf("[INFO][%s] Server responded with a certificate.", commonName.Domain) + return cerRes, nil + } + + // The certificate was granted but is not yet issued. + // Check retry-after and loop. + ra := resp.Header.Get("Retry-After") + retryAfter, err := strconv.Atoi(ra) + if err != nil { + return CertificateResource{}, err + } + + logf("[INFO][%s] acme: Server responded with status 202; retrying after %ds", commonName.Domain, retryAfter) + time.Sleep(time.Duration(retryAfter) * time.Second) + + break + default: + return CertificateResource{}, handleHTTPError(resp) + } + + resp, err = httpGet(cerRes.CertURL) + if err != nil { + return CertificateResource{}, err + } + } +} + +// getIssuerCertificate requests the issuer certificate and caches it for +// subsequent requests. +func (c *Client) getIssuerCertificate(url string) ([]byte, error) { + logf("[INFO] acme: Requesting issuer cert from %s", url) + if c.issuerCert != nil { + return c.issuerCert, nil + } + + resp, err := httpGet(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024)) + if err != nil { + return nil, err + } + + _, err = x509.ParseCertificate(issuerBytes) + if err != nil { + return nil, err + } + + c.issuerCert = issuerBytes + return issuerBytes, err +} + +func parseLinks(links []string) map[string]string { + aBrkt := regexp.MustCompile("[<>]") + slver := regexp.MustCompile("(.+) *= *\"(.+)\"") + linkMap := make(map[string]string) + + for _, link := range links { + + link = aBrkt.ReplaceAllString(link, "") + parts := strings.Split(link, ";") + + matches := slver.FindStringSubmatch(parts[1]) + if len(matches) > 0 { + linkMap[matches[2]] = parts[0] + } + } + + return linkMap +} + +// validate makes the ACME server start validating a +// challenge response, only returning once it is done. +func validate(j *jws, domain, uri string, chlng challenge) error { + var challengeResponse challenge + + hdr, err := postJSON(j, uri, chlng, &challengeResponse) + if err != nil { + return err + } + + // After the path is sent, the ACME server will access our server. + // Repeatedly check the server for an updated status on our request. + for { + switch challengeResponse.Status { + case "valid": + logf("[INFO][%s] The server validated our request", domain) + return nil + case "pending": + break + case "invalid": + return handleChallengeError(challengeResponse) + default: + return errors.New("The server returned an unexpected state.") + } + + ra, err := strconv.Atoi(hdr.Get("Retry-After")) + if err != nil { + // The ACME server MUST return a Retry-After. + // If it doesn't, we'll just poll hard. + ra = 1 + } + time.Sleep(time.Duration(ra) * time.Second) + + hdr, err = getJSON(uri, &challengeResponse) + if err != nil { + return err + } + } +} diff --git a/vendor/github.com/xenolf/lego/acme/crypto.go b/vendor/github.com/xenolf/lego/acme/crypto.go new file mode 100644 index 000000000000..33240b61aac6 --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/crypto.go @@ -0,0 +1,339 @@ +package acme + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "math/big" + "net/http" + "strings" + "time" + + "golang.org/x/crypto/ocsp" +) + +// KeyType represents the key algo as well as the key size or curve to use. +type KeyType string +type derCertificateBytes []byte + +// Constants for all key types we support. +const ( + EC256 = KeyType("P256") + EC384 = KeyType("P384") + RSA2048 = KeyType("2048") + RSA4096 = KeyType("4096") + RSA8192 = KeyType("8192") +) + +const ( + // OCSPGood means that the certificate is valid. + OCSPGood = ocsp.Good + // OCSPRevoked means that the certificate has been deliberately revoked. + OCSPRevoked = ocsp.Revoked + // OCSPUnknown means that the OCSP responder doesn't know about the certificate. + OCSPUnknown = ocsp.Unknown + // OCSPServerFailed means that the OCSP responder failed to process the request. + OCSPServerFailed = ocsp.ServerFailed +) + +// GetOCSPForCert takes a PEM encoded cert or cert bundle returning the raw OCSP response, +// the parsed response, and an error, if any. The returned []byte can be passed directly +// into the OCSPStaple property of a tls.Certificate. If the bundle only contains the +// issued certificate, this function will try to get the issuer certificate from the +// IssuingCertificateURL in the certificate. If the []byte and/or ocsp.Response return +// values are nil, the OCSP status may be assumed OCSPUnknown. +func GetOCSPForCert(bundle []byte) ([]byte, *ocsp.Response, error) { + certificates, err := parsePEMBundle(bundle) + if err != nil { + return nil, nil, err + } + + // We expect the certificate slice to be ordered downwards the chain. + // SRV CRT -> CA. We need to pull the leaf and issuer certs out of it, + // which should always be the first two certificates. If there's no + // OCSP server listed in the leaf cert, there's nothing to do. And if + // we have only one certificate so far, we need to get the issuer cert. + issuedCert := certificates[0] + if len(issuedCert.OCSPServer) == 0 { + return nil, nil, errors.New("no OCSP server specified in cert") + } + if len(certificates) == 1 { + // TODO: build fallback. If this fails, check the remaining array entries. + if len(issuedCert.IssuingCertificateURL) == 0 { + return nil, nil, errors.New("no issuing certificate URL") + } + + resp, err := httpGet(issuedCert.IssuingCertificateURL[0]) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024)) + if err != nil { + return nil, nil, err + } + + issuerCert, err := x509.ParseCertificate(issuerBytes) + if err != nil { + return nil, nil, err + } + + // Insert it into the slice on position 0 + // We want it ordered right SRV CRT -> CA + certificates = append(certificates, issuerCert) + } + issuerCert := certificates[1] + + // Finally kick off the OCSP request. + ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil) + if err != nil { + return nil, nil, err + } + + reader := bytes.NewReader(ocspReq) + req, err := httpPost(issuedCert.OCSPServer[0], "application/ocsp-request", reader) + if err != nil { + return nil, nil, err + } + defer req.Body.Close() + + ocspResBytes, err := ioutil.ReadAll(limitReader(req.Body, 1024*1024)) + ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert) + if err != nil { + return nil, nil, err + } + + if ocspRes.Certificate == nil { + err = ocspRes.CheckSignatureFrom(issuerCert) + if err != nil { + return nil, nil, err + } + } + + return ocspResBytes, ocspRes, nil +} + +func getKeyAuthorization(token string, key interface{}) (string, error) { + var publicKey crypto.PublicKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + publicKey = k.Public() + case *rsa.PrivateKey: + publicKey = k.Public() + } + + // Generate the Key Authorization for the challenge + jwk := keyAsJWK(publicKey) + if jwk == nil { + return "", errors.New("Could not generate JWK from key.") + } + thumbBytes, err := jwk.Thumbprint(crypto.SHA256) + if err != nil { + return "", err + } + + // unpad the base64URL + keyThumb := base64.URLEncoding.EncodeToString(thumbBytes) + index := strings.Index(keyThumb, "=") + if index != -1 { + keyThumb = keyThumb[:index] + } + + return token + "." + keyThumb, nil +} + +// parsePEMBundle parses a certificate bundle from top to bottom and returns +// a slice of x509 certificates. This function will error if no certificates are found. +func parsePEMBundle(bundle []byte) ([]*x509.Certificate, error) { + var certificates []*x509.Certificate + var certDERBlock *pem.Block + + for { + certDERBlock, bundle = pem.Decode(bundle) + if certDERBlock == nil { + break + } + + if certDERBlock.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(certDERBlock.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } + } + + if len(certificates) == 0 { + return nil, errors.New("No certificates were found while parsing the bundle.") + } + + return certificates, nil +} + +func parsePEMPrivateKey(key []byte) (crypto.PrivateKey, error) { + keyBlock, _ := pem.Decode(key) + + switch keyBlock.Type { + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(keyBlock.Bytes) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(keyBlock.Bytes) + default: + return nil, errors.New("Unknown PEM header value") + } +} + +func generatePrivateKey(keyType KeyType) (crypto.PrivateKey, error) { + + switch keyType { + case EC256: + return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + case EC384: + return ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + case RSA2048: + return rsa.GenerateKey(rand.Reader, 2048) + case RSA4096: + return rsa.GenerateKey(rand.Reader, 4096) + case RSA8192: + return rsa.GenerateKey(rand.Reader, 8192) + } + + return nil, fmt.Errorf("Invalid KeyType: %s", keyType) +} + +func generateCsr(privateKey crypto.PrivateKey, domain string, san []string) ([]byte, error) { + template := x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: domain, + }, + } + + if len(san) > 0 { + template.DNSNames = san + } + + return x509.CreateCertificateRequest(rand.Reader, &template, privateKey) +} + +func pemEncode(data interface{}) []byte { + var pemBlock *pem.Block + switch key := data.(type) { + case *ecdsa.PrivateKey: + keyBytes, _ := x509.MarshalECPrivateKey(key) + pemBlock = &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes} + case *rsa.PrivateKey: + pemBlock = &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)} + break + case *x509.CertificateRequest: + pemBlock = &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: key.Raw} + break + case derCertificateBytes: + pemBlock = &pem.Block{Type: "CERTIFICATE", Bytes: []byte(data.(derCertificateBytes))} + } + + return pem.EncodeToMemory(pemBlock) +} + +func pemDecode(data []byte) (*pem.Block, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, fmt.Errorf("Pem decode did not yield a valid block. Is the certificate in the right format?") + } + + return pemBlock, nil +} + +func pemDecodeTox509(pem []byte) (*x509.Certificate, error) { + pemBlock, err := pemDecode(pem) + if pemBlock == nil { + return nil, err + } + + return x509.ParseCertificate(pemBlock.Bytes) +} + +func pemDecodeTox509CSR(pem []byte) (*x509.CertificateRequest, error) { + pemBlock, err := pemDecode(pem) + if pemBlock == nil { + return nil, err + } + + if pemBlock.Type != "CERTIFICATE REQUEST" { + return nil, fmt.Errorf("PEM block is not a certificate request") + } + + return x509.ParseCertificateRequest(pemBlock.Bytes) +} + +// GetPEMCertExpiration returns the "NotAfter" date of a PEM encoded certificate. +// The certificate has to be PEM encoded. Any other encodings like DER will fail. +func GetPEMCertExpiration(cert []byte) (time.Time, error) { + pemBlock, err := pemDecode(cert) + if pemBlock == nil { + return time.Time{}, err + } + + return getCertExpiration(pemBlock.Bytes) +} + +// getCertExpiration returns the "NotAfter" date of a DER encoded certificate. +func getCertExpiration(cert []byte) (time.Time, error) { + pCert, err := x509.ParseCertificate(cert) + if err != nil { + return time.Time{}, err + } + + return pCert.NotAfter, nil +} + +func generatePemCert(privKey *rsa.PrivateKey, domain string) ([]byte, error) { + derBytes, err := generateDerCert(privKey, time.Time{}, domain) + if err != nil { + return nil, err + } + + return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}), nil +} + +func generateDerCert(privKey *rsa.PrivateKey, expiration time.Time, domain string) ([]byte, error) { + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, err + } + + if expiration.IsZero() { + expiration = time.Now().Add(365) + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: "ACME Challenge TEMP", + }, + NotBefore: time.Now(), + NotAfter: expiration, + + KeyUsage: x509.KeyUsageKeyEncipherment, + BasicConstraintsValid: true, + DNSNames: []string{domain}, + } + + return x509.CreateCertificate(rand.Reader, &template, &template, &privKey.PublicKey, privKey) +} + +func limitReader(rd io.ReadCloser, numBytes int64) io.ReadCloser { + return http.MaxBytesReader(nil, rd, numBytes) +} diff --git a/vendor/github.com/xenolf/lego/acme/dns_challenge.go b/vendor/github.com/xenolf/lego/acme/dns_challenge.go new file mode 100644 index 000000000000..8eb541db832d --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/dns_challenge.go @@ -0,0 +1,279 @@ +package acme + +import ( + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "log" + "net" + "strings" + "time" + + "github.com/miekg/dns" + "golang.org/x/net/publicsuffix" +) + +type preCheckDNSFunc func(fqdn, value string) (bool, error) + +var ( + preCheckDNS preCheckDNSFunc = checkDNSPropagation + fqdnToZone = map[string]string{} +) + +var RecursiveNameservers = []string{ + "google-public-dns-a.google.com:53", + "google-public-dns-b.google.com:53", +} + +// DNSTimeout is used to override the default DNS timeout of 10 seconds. +var DNSTimeout = 10 * time.Second + +// DNS01Record returns a DNS record which will fulfill the `dns-01` challenge +func DNS01Record(domain, keyAuth string) (fqdn string, value string, ttl int) { + keyAuthShaBytes := sha256.Sum256([]byte(keyAuth)) + // base64URL encoding without padding + keyAuthSha := base64.URLEncoding.EncodeToString(keyAuthShaBytes[:sha256.Size]) + value = strings.TrimRight(keyAuthSha, "=") + ttl = 5 + fqdn = fmt.Sprintf("_acme-challenge.%s.", domain) + return +} + +// dnsChallenge implements the dns-01 challenge according to ACME 7.5 +type dnsChallenge struct { + jws *jws + validate validateFunc + provider ChallengeProvider +} + +func (s *dnsChallenge) Solve(chlng challenge, domain string) error { + logf("[INFO][%s] acme: Trying to solve DNS-01", domain) + + if s.provider == nil { + return errors.New("No DNS Provider configured") + } + + // Generate the Key Authorization for the challenge + keyAuth, err := getKeyAuthorization(chlng.Token, s.jws.privKey) + if err != nil { + return err + } + + err = s.provider.Present(domain, chlng.Token, keyAuth) + if err != nil { + return fmt.Errorf("Error presenting token: %s", err) + } + defer func() { + err := s.provider.CleanUp(domain, chlng.Token, keyAuth) + if err != nil { + log.Printf("Error cleaning up %s: %v ", domain, err) + } + }() + + fqdn, value, _ := DNS01Record(domain, keyAuth) + + logf("[INFO][%s] Checking DNS record propagation...", domain) + + var timeout, interval time.Duration + switch provider := s.provider.(type) { + case ChallengeProviderTimeout: + timeout, interval = provider.Timeout() + default: + timeout, interval = 60*time.Second, 2*time.Second + } + + err = WaitFor(timeout, interval, func() (bool, error) { + return preCheckDNS(fqdn, value) + }) + if err != nil { + return err + } + + return s.validate(s.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth}) +} + +// checkDNSPropagation checks if the expected TXT record has been propagated to all authoritative nameservers. +func checkDNSPropagation(fqdn, value string) (bool, error) { + // Initial attempt to resolve at the recursive NS + r, err := dnsQuery(fqdn, dns.TypeTXT, RecursiveNameservers, true) + if err != nil { + return false, err + } + if r.Rcode == dns.RcodeSuccess { + // If we see a CNAME here then use the alias + for _, rr := range r.Answer { + if cn, ok := rr.(*dns.CNAME); ok { + if cn.Hdr.Name == fqdn { + fqdn = cn.Target + break + } + } + } + } + + authoritativeNss, err := lookupNameservers(fqdn) + if err != nil { + return false, err + } + + return checkAuthoritativeNss(fqdn, value, authoritativeNss) +} + +// checkAuthoritativeNss queries each of the given nameservers for the expected TXT record. +func checkAuthoritativeNss(fqdn, value string, nameservers []string) (bool, error) { + for _, ns := range nameservers { + r, err := dnsQuery(fqdn, dns.TypeTXT, []string{net.JoinHostPort(ns, "53")}, false) + if err != nil { + return false, err + } + + if r.Rcode != dns.RcodeSuccess { + return false, fmt.Errorf("NS %s returned %s for %s", ns, dns.RcodeToString[r.Rcode], fqdn) + } + + var found bool + for _, rr := range r.Answer { + if txt, ok := rr.(*dns.TXT); ok { + if strings.Join(txt.Txt, "") == value { + found = true + break + } + } + } + + if !found { + return false, fmt.Errorf("NS %s did not return the expected TXT record", ns) + } + } + + return true, nil +} + +// dnsQuery will query a nameserver, iterating through the supplied servers as it retries +// The nameserver should include a port, to facilitate testing where we talk to a mock dns server. +func dnsQuery(fqdn string, rtype uint16, nameservers []string, recursive bool) (in *dns.Msg, err error) { + m := new(dns.Msg) + m.SetQuestion(fqdn, rtype) + m.SetEdns0(4096, false) + + if !recursive { + m.RecursionDesired = false + } + + // Will retry the request based on the number of servers (n+1) + for i := 1; i <= len(nameservers)+1; i++ { + ns := nameservers[i%len(nameservers)] + udp := &dns.Client{Net: "udp", Timeout: DNSTimeout} + in, _, err = udp.Exchange(m, ns) + + if err == dns.ErrTruncated { + tcp := &dns.Client{Net: "tcp", Timeout: DNSTimeout} + // If the TCP request suceeds, the err will reset to nil + in, _, err = tcp.Exchange(m, ns) + } + + if err == nil { + break + } + } + return +} + +// lookupNameservers returns the authoritative nameservers for the given fqdn. +func lookupNameservers(fqdn string) ([]string, error) { + var authoritativeNss []string + + zone, err := FindZoneByFqdn(fqdn, RecursiveNameservers) + if err != nil { + return nil, err + } + + r, err := dnsQuery(zone, dns.TypeNS, RecursiveNameservers, true) + if err != nil { + return nil, err + } + + for _, rr := range r.Answer { + if ns, ok := rr.(*dns.NS); ok { + authoritativeNss = append(authoritativeNss, strings.ToLower(ns.Ns)) + } + } + + if len(authoritativeNss) > 0 { + return authoritativeNss, nil + } + return nil, fmt.Errorf("Could not determine authoritative nameservers") +} + +// FindZoneByFqdn determines the zone of the given fqdn +func FindZoneByFqdn(fqdn string, nameservers []string) (string, error) { + // Do we have it cached? + if zone, ok := fqdnToZone[fqdn]; ok { + return zone, nil + } + + // Query the authoritative nameserver for a hopefully non-existing SOA record, + // in the authority section of the reply it will have the SOA of the + // containing zone. rfc2308 has this to say on the subject: + // Name servers authoritative for a zone MUST include the SOA record of + // the zone in the authority section of the response when reporting an + // NXDOMAIN or indicating that no data (NODATA) of the requested type exists + in, err := dnsQuery(fqdn, dns.TypeSOA, nameservers, true) + if err != nil { + return "", err + } + if in.Rcode != dns.RcodeNameError { + if in.Rcode != dns.RcodeSuccess { + return "", fmt.Errorf("The NS returned %s for %s", dns.RcodeToString[in.Rcode], fqdn) + } + // We have a success, so one of the answers has to be a SOA RR + for _, ans := range in.Answer { + if soa, ok := ans.(*dns.SOA); ok { + return checkIfTLD(fqdn, soa) + } + } + // Or it is NODATA, fall through to NXDOMAIN + } + // Search the authority section for our precious SOA RR + for _, ns := range in.Ns { + if soa, ok := ns.(*dns.SOA); ok { + return checkIfTLD(fqdn, soa) + } + } + return "", fmt.Errorf("The NS did not return the expected SOA record in the authority section") +} + +func checkIfTLD(fqdn string, soa *dns.SOA) (string, error) { + zone := soa.Hdr.Name + // If we ended up on one of the TLDs, it means the domain did not exist. + publicsuffix, _ := publicsuffix.PublicSuffix(UnFqdn(zone)) + if publicsuffix == UnFqdn(zone) { + return "", fmt.Errorf("Could not determine zone authoritatively") + } + fqdnToZone[fqdn] = zone + return zone, nil +} + +// ClearFqdnCache clears the cache of fqdn to zone mappings. Primarily used in testing. +func ClearFqdnCache() { + fqdnToZone = map[string]string{} +} + +// ToFqdn converts the name into a fqdn appending a trailing dot. +func ToFqdn(name string) string { + n := len(name) + if n == 0 || name[n-1] == '.' { + return name + } + return name + "." +} + +// UnFqdn converts the fqdn into a name removing the trailing dot. +func UnFqdn(name string) string { + n := len(name) + if n != 0 && name[n-1] == '.' { + return name[:n-1] + } + return name +} diff --git a/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go b/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go new file mode 100644 index 000000000000..240384e60b4b --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go @@ -0,0 +1,53 @@ +package acme + +import ( + "bufio" + "fmt" + "os" +) + +const ( + dnsTemplate = "%s %d IN TXT \"%s\"" +) + +// DNSProviderManual is an implementation of the ChallengeProvider interface +type DNSProviderManual struct{} + +// NewDNSProviderManual returns a DNSProviderManual instance. +func NewDNSProviderManual() (*DNSProviderManual, error) { + return &DNSProviderManual{}, nil +} + +// Present prints instructions for manually creating the TXT record +func (*DNSProviderManual) Present(domain, token, keyAuth string) error { + fqdn, value, ttl := DNS01Record(domain, keyAuth) + dnsRecord := fmt.Sprintf(dnsTemplate, fqdn, ttl, value) + + authZone, err := FindZoneByFqdn(fqdn, RecursiveNameservers) + if err != nil { + return err + } + + logf("[INFO] acme: Please create the following TXT record in your %s zone:", authZone) + logf("[INFO] acme: %s", dnsRecord) + logf("[INFO] acme: Press 'Enter' when you are done") + + reader := bufio.NewReader(os.Stdin) + _, _ = reader.ReadString('\n') + return nil +} + +// CleanUp prints instructions for manually removing the TXT record +func (*DNSProviderManual) CleanUp(domain, token, keyAuth string) error { + fqdn, _, ttl := DNS01Record(domain, keyAuth) + dnsRecord := fmt.Sprintf(dnsTemplate, fqdn, ttl, "...") + + authZone, err := FindZoneByFqdn(fqdn, RecursiveNameservers) + if err != nil { + return err + } + + logf("[INFO] acme: You can now remove this TXT record from your %s zone:", authZone) + logf("[INFO] acme: %s", dnsRecord) + return nil +} diff --git a/vendor/github.com/xenolf/lego/acme/error.go b/vendor/github.com/xenolf/lego/acme/error.go new file mode 100644 index 000000000000..2aa690b33516 --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/error.go @@ -0,0 +1,86 @@ +package acme + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" +) + +const ( + tosAgreementError = "Must agree to subscriber agreement before any further actions" +) + +// RemoteError is the base type for all errors specific to the ACME protocol. +type RemoteError struct { + StatusCode int `json:"status,omitempty"` + Type string `json:"type"` + Detail string `json:"detail"` +} + +func (e RemoteError) Error() string { + return fmt.Sprintf("acme: Error %d - %s - %s", e.StatusCode, e.Type, e.Detail) +} + +// TOSError represents the error which is returned if the user needs to +// accept the TOS. +// TODO: include the new TOS url if we can somehow obtain it. +type TOSError struct { + RemoteError +} + +type domainError struct { + Domain string + Error error +} + +type challengeError struct { + RemoteError + records []validationRecord +} + +func (c challengeError) Error() string { + + var errStr string + for _, validation := range c.records { + errStr = errStr + fmt.Sprintf("\tValidation for %s:%s\n\tResolved to:\n\t\t%s\n\tUsed: %s\n\n", + validation.Hostname, validation.Port, strings.Join(validation.ResolvedAddresses, "\n\t\t"), validation.UsedAddress) + } + + return fmt.Sprintf("%s\nError Detail:\n%s", c.RemoteError.Error(), errStr) +} + +func handleHTTPError(resp *http.Response) error { + var errorDetail RemoteError + + contenType := resp.Header.Get("Content-Type") + // try to decode the content as JSON + if contenType == "application/json" || contenType == "application/problem+json" { + decoder := json.NewDecoder(resp.Body) + err := decoder.Decode(&errorDetail) + if err != nil { + return err + } + } else { + detailBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024)) + if err != nil { + return err + } + + errorDetail.Detail = string(detailBytes) + } + + errorDetail.StatusCode = resp.StatusCode + + // Check for errors we handle specifically + if errorDetail.StatusCode == http.StatusForbidden && errorDetail.Detail == tosAgreementError { + return TOSError{errorDetail} + } + + return errorDetail +} + +func handleChallengeError(chlng challenge) error { + return challengeError{chlng.Error, chlng.ValidationRecords} +} diff --git a/vendor/github.com/xenolf/lego/acme/http.go b/vendor/github.com/xenolf/lego/acme/http.go new file mode 100644 index 000000000000..3b5a37cbdfb4 --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/http.go @@ -0,0 +1,120 @@ +package acme + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "runtime" + "strings" + "time" +) + +// UserAgent (if non-empty) will be tacked onto the User-Agent string in requests. +var UserAgent string + +// HTTPTimeout is used to override the default HTTP timeout of 10 seconds. +var HTTPTimeout = 10 * time.Second + +// defaultClient is an HTTP client with a reasonable timeout value. +var defaultClient = http.Client{Timeout: HTTPTimeout} + +const ( + // defaultGoUserAgent is the Go HTTP package user agent string. Too + // bad it isn't exported. If it changes, we should update it here, too. + defaultGoUserAgent = "Go-http-client/1.1" + + // ourUserAgent is the User-Agent of this underlying library package. + ourUserAgent = "xenolf-acme" +) + +// httpHead performs a HEAD request with a proper User-Agent string. +// The response body (resp.Body) is already closed when this function returns. +func httpHead(url string) (resp *http.Response, err error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + + req.Header.Set("User-Agent", userAgent()) + + resp, err = defaultClient.Do(req) + if err != nil { + return resp, err + } + resp.Body.Close() + return resp, err +} + +// httpPost performs a POST request with a proper User-Agent string. +// Callers should close resp.Body when done reading from it. +func httpPost(url string, bodyType string, body io.Reader) (resp *http.Response, err error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + req.Header.Set("User-Agent", userAgent()) + + return defaultClient.Do(req) +} + +// httpGet performs a GET request with a proper User-Agent string. +// Callers should close resp.Body when done reading from it. +func httpGet(url string) (resp *http.Response, err error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", userAgent()) + + return defaultClient.Do(req) +} + +// getJSON performs an HTTP GET request and parses the response body +// as JSON, into the provided respBody object. +func getJSON(uri string, respBody interface{}) (http.Header, error) { + resp, err := httpGet(uri) + if err != nil { + return nil, fmt.Errorf("failed to get %q: %v", uri, err) + } + defer resp.Body.Close() + + if resp.StatusCode >= http.StatusBadRequest { + return resp.Header, handleHTTPError(resp) + } + + return resp.Header, json.NewDecoder(resp.Body).Decode(respBody) +} + +// postJSON performs an HTTP POST request and parses the response body +// as JSON, into the provided respBody object. +func postJSON(j *jws, uri string, reqBody, respBody interface{}) (http.Header, error) { + jsonBytes, err := json.Marshal(reqBody) + if err != nil { + return nil, errors.New("Failed to marshal network message...") + } + + resp, err := j.post(uri, jsonBytes) + if err != nil { + return nil, fmt.Errorf("Failed to post JWS message. -> %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode >= http.StatusBadRequest { + return resp.Header, handleHTTPError(resp) + } + + if respBody == nil { + return resp.Header, nil + } + + return resp.Header, json.NewDecoder(resp.Body).Decode(respBody) +} + +// userAgent builds and returns the User-Agent string to use in requests. +func userAgent() string { + ua := fmt.Sprintf("%s (%s; %s) %s %s", defaultGoUserAgent, runtime.GOOS, runtime.GOARCH, ourUserAgent, UserAgent) + return strings.TrimSpace(ua) +} diff --git a/vendor/github.com/xenolf/lego/acme/http_challenge.go b/vendor/github.com/xenolf/lego/acme/http_challenge.go new file mode 100644 index 000000000000..95cb1fd816bc --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/http_challenge.go @@ -0,0 +1,41 @@ +package acme + +import ( + "fmt" + "log" +) + +type httpChallenge struct { + jws *jws + validate validateFunc + provider ChallengeProvider +} + +// HTTP01ChallengePath returns the URL path for the `http-01` challenge +func HTTP01ChallengePath(token string) string { + return "/.well-known/acme-challenge/" + token +} + +func (s *httpChallenge) Solve(chlng challenge, domain string) error { + + logf("[INFO][%s] acme: Trying to solve HTTP-01", domain) + + // Generate the Key Authorization for the challenge + keyAuth, err := getKeyAuthorization(chlng.Token, s.jws.privKey) + if err != nil { + return err + } + + err = s.provider.Present(domain, chlng.Token, keyAuth) + if err != nil { + return fmt.Errorf("[%s] error presenting token: %v", domain, err) + } + defer func() { + err := s.provider.CleanUp(domain, chlng.Token, keyAuth) + if err != nil { + log.Printf("[%s] error cleaning up: %v", domain, err) + } + }() + + return s.validate(s.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth}) +} diff --git a/vendor/github.com/xenolf/lego/acme/http_challenge_server.go b/vendor/github.com/xenolf/lego/acme/http_challenge_server.go new file mode 100644 index 000000000000..42541380cb41 --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/http_challenge_server.go @@ -0,0 +1,79 @@ +package acme + +import ( + "fmt" + "net" + "net/http" + "strings" +) + +// HTTPProviderServer implements ChallengeProvider for `http-01` challenge +// It may be instantiated without using the NewHTTPProviderServer function if +// you want only to use the default values. +type HTTPProviderServer struct { + iface string + port string + done chan bool + listener net.Listener +} + +// NewHTTPProviderServer creates a new HTTPProviderServer on the selected interface and port. +// Setting iface and / or port to an empty string will make the server fall back to +// the "any" interface and port 80 respectively. +func NewHTTPProviderServer(iface, port string) *HTTPProviderServer { + return &HTTPProviderServer{iface: iface, port: port} +} + +// Present starts a web server and makes the token available at `HTTP01ChallengePath(token)` for web requests. +func (s *HTTPProviderServer) Present(domain, token, keyAuth string) error { + if s.port == "" { + s.port = "80" + } + + var err error + s.listener, err = net.Listen("tcp", net.JoinHostPort(s.iface, s.port)) + if err != nil { + return fmt.Errorf("Could not start HTTP server for challenge -> %v", err) + } + + s.done = make(chan bool) + go s.serve(domain, token, keyAuth) + return nil +} + +// CleanUp closes the HTTP server and removes the token from `HTTP01ChallengePath(token)` +func (s *HTTPProviderServer) CleanUp(domain, token, keyAuth string) error { + if s.listener == nil { + return nil + } + s.listener.Close() + <-s.done + return nil +} + +func (s *HTTPProviderServer) serve(domain, token, keyAuth string) { + path := HTTP01ChallengePath(token) + + // The handler validates the HOST header and request type. + // For validation it then writes the token the server returned with the challenge + mux := http.NewServeMux() + mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.Host, domain) && r.Method == "GET" { + w.Header().Add("Content-Type", "text/plain") + w.Write([]byte(keyAuth)) + logf("[INFO][%s] Served key authentication", domain) + } else { + logf("[INFO] Received request for domain %s with method %s", r.Host, r.Method) + w.Write([]byte("TEST")) + } + }) + + httpServer := &http.Server{ + Handler: mux, + } + // Once httpServer is shut down we don't want any lingering + // connections, so disable KeepAlives. + httpServer.SetKeepAlivesEnabled(false) + httpServer.Serve(s.listener) + s.done <- true +} diff --git a/vendor/github.com/xenolf/lego/acme/jws.go b/vendor/github.com/xenolf/lego/acme/jws.go new file mode 100644 index 000000000000..e000bc645b10 --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/jws.go @@ -0,0 +1,109 @@ +package acme + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "fmt" + "net/http" + + "gopkg.in/square/go-jose.v1" +) + +type jws struct { + directoryURL string + privKey crypto.PrivateKey + nonces []string +} + +func keyAsJWK(key interface{}) *jose.JsonWebKey { + switch k := key.(type) { + case *ecdsa.PublicKey: + return &jose.JsonWebKey{Key: k, Algorithm: "EC"} + case *rsa.PublicKey: + return &jose.JsonWebKey{Key: k, Algorithm: "RSA"} + + default: + return nil + } +} + +// Posts a JWS signed message to the specified URL +func (j *jws) post(url string, content []byte) (*http.Response, error) { + signedContent, err := j.signContent(content) + if err != nil { + return nil, err + } + + resp, err := httpPost(url, "application/jose+json", bytes.NewBuffer([]byte(signedContent.FullSerialize()))) + if err != nil { + return nil, err + } + + j.getNonceFromResponse(resp) + + return resp, err +} + +func (j *jws) signContent(content []byte) (*jose.JsonWebSignature, error) { + + var alg jose.SignatureAlgorithm + switch k := j.privKey.(type) { + case *rsa.PrivateKey: + alg = jose.RS256 + case *ecdsa.PrivateKey: + if k.Curve == elliptic.P256() { + alg = jose.ES256 + } else if k.Curve == elliptic.P384() { + alg = jose.ES384 + } + } + + signer, err := jose.NewSigner(alg, j.privKey) + if err != nil { + return nil, err + } + signer.SetNonceSource(j) + + signed, err := signer.Sign(content) + if err != nil { + return nil, err + } + return signed, nil +} + +func (j *jws) getNonceFromResponse(resp *http.Response) error { + nonce := resp.Header.Get("Replay-Nonce") + if nonce == "" { + return fmt.Errorf("Server did not respond with a proper nonce header.") + } + + j.nonces = append(j.nonces, nonce) + return nil +} + +func (j *jws) getNonce() error { + resp, err := httpHead(j.directoryURL) + if err != nil { + return err + } + + return j.getNonceFromResponse(resp) +} + +func (j *jws) Nonce() (string, error) { + nonce := "" + if len(j.nonces) == 0 { + err := j.getNonce() + if err != nil { + return nonce, err + } + } + if len(j.nonces) == 0 { + return "", fmt.Errorf("Can't get nonce") + } + nonce, j.nonces = j.nonces[len(j.nonces)-1], j.nonces[:len(j.nonces)-1] + return nonce, nil +} diff --git a/vendor/github.com/xenolf/lego/acme/messages.go b/vendor/github.com/xenolf/lego/acme/messages.go new file mode 100644 index 000000000000..0efeae67479e --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/messages.go @@ -0,0 +1,117 @@ +package acme + +import ( + "time" + + "gopkg.in/square/go-jose.v1" +) + +type directory struct { + NewAuthzURL string `json:"new-authz"` + NewCertURL string `json:"new-cert"` + NewRegURL string `json:"new-reg"` + RevokeCertURL string `json:"revoke-cert"` +} + +type recoveryKeyMessage struct { + Length int `json:"length,omitempty"` + Client jose.JsonWebKey `json:"client,omitempty"` + Server jose.JsonWebKey `json:"client,omitempty"` +} + +type registrationMessage struct { + Resource string `json:"resource"` + Contact []string `json:"contact"` + Delete bool `json:"delete,omitempty"` + // RecoveryKey recoveryKeyMessage `json:"recoveryKey,omitempty"` +} + +// Registration is returned by the ACME server after the registration +// The client implementation should save this registration somewhere. +type Registration struct { + Resource string `json:"resource,omitempty"` + ID int `json:"id"` + Key jose.JsonWebKey `json:"key"` + Contact []string `json:"contact"` + Agreement string `json:"agreement,omitempty"` + Authorizations string `json:"authorizations,omitempty"` + Certificates string `json:"certificates,omitempty"` + // RecoveryKey recoveryKeyMessage `json:"recoveryKey,omitempty"` +} + +// RegistrationResource represents all important informations about a registration +// of which the client needs to keep track itself. +type RegistrationResource struct { + Body Registration `json:"body,omitempty"` + URI string `json:"uri,omitempty"` + NewAuthzURL string `json:"new_authzr_uri,omitempty"` + TosURL string `json:"terms_of_service,omitempty"` +} + +type authorizationResource struct { + Body authorization + Domain string + NewCertURL string + AuthURL string +} + +type authorization struct { + Resource string `json:"resource,omitempty"` + Identifier identifier `json:"identifier"` + Status string `json:"status,omitempty"` + Expires time.Time `json:"expires,omitempty"` + Challenges []challenge `json:"challenges,omitempty"` + Combinations [][]int `json:"combinations,omitempty"` +} + +type identifier struct { + Type string `json:"type"` + Value string `json:"value"` +} + +type validationRecord struct { + URI string `json:"url,omitempty"` + Hostname string `json:"hostname,omitempty"` + Port string `json:"port,omitempty"` + ResolvedAddresses []string `json:"addressesResolved,omitempty"` + UsedAddress string `json:"addressUsed,omitempty"` +} + +type challenge struct { + Resource string `json:"resource,omitempty"` + Type Challenge `json:"type,omitempty"` + Status string `json:"status,omitempty"` + URI string `json:"uri,omitempty"` + Token string `json:"token,omitempty"` + KeyAuthorization string `json:"keyAuthorization,omitempty"` + TLS bool `json:"tls,omitempty"` + Iterations int `json:"n,omitempty"` + Error RemoteError `json:"error,omitempty"` + ValidationRecords []validationRecord `json:"validationRecord,omitempty"` +} + +type csrMessage struct { + Resource string `json:"resource,omitempty"` + Csr string `json:"csr"` + Authorizations []string `json:"authorizations"` +} + +type revokeCertMessage struct { + Resource string `json:"resource"` + Certificate string `json:"certificate"` +} + +// CertificateResource represents a CA issued certificate. +// PrivateKey and Certificate are both already PEM encoded +// and can be directly written to disk. Certificate may +// be a certificate bundle, depending on the options supplied +// to create it. +type CertificateResource struct { + Domain string `json:"domain"` + CertURL string `json:"certUrl"` + CertStableURL string `json:"certStableUrl"` + AccountRef string `json:"accountRef,omitempty"` + PrivateKey []byte `json:"-"` + Certificate []byte `json:"-"` + CSR []byte `json:"-"` +} diff --git a/vendor/github.com/xenolf/lego/acme/pop_challenge.go b/vendor/github.com/xenolf/lego/acme/pop_challenge.go new file mode 100644 index 000000000000..8d2a213b01d7 --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/pop_challenge.go @@ -0,0 +1 @@ +package acme diff --git a/vendor/github.com/xenolf/lego/acme/provider.go b/vendor/github.com/xenolf/lego/acme/provider.go new file mode 100644 index 000000000000..d177ff07a20e --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/provider.go @@ -0,0 +1,28 @@ +package acme + +import "time" + +// ChallengeProvider enables implementing a custom challenge +// provider. Present presents the solution to a challenge available to +// be solved. CleanUp will be called by the challenge if Present ends +// in a non-error state. +type ChallengeProvider interface { + Present(domain, token, keyAuth string) error + CleanUp(domain, token, keyAuth string) error +} + +// ChallengeProviderTimeout allows for implementing a +// ChallengeProvider where an unusually long timeout is required when +// waiting for an ACME challenge to be satisfied, such as when +// checking for DNS record progagation. If an implementor of a +// ChallengeProvider provides a Timeout method, then the return values +// of the Timeout method will be used when appropriate by the acme +// package. The interval value is the time between checks. +// +// The default values used for timeout and interval are 60 seconds and +// 2 seconds respectively. These are used when no Timeout method is +// defined for the ChallengeProvider. +type ChallengeProviderTimeout interface { + ChallengeProvider + Timeout() (timeout, interval time.Duration) +} diff --git a/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go new file mode 100644 index 000000000000..34383cbfaf41 --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go @@ -0,0 +1,67 @@ +package acme + +import ( + "crypto/rsa" + "crypto/sha256" + "crypto/tls" + "encoding/hex" + "fmt" + "log" +) + +type tlsSNIChallenge struct { + jws *jws + validate validateFunc + provider ChallengeProvider +} + +func (t *tlsSNIChallenge) Solve(chlng challenge, domain string) error { + // FIXME: https://github.com/ietf-wg-acme/acme/pull/22 + // Currently we implement this challenge to track boulder, not the current spec! + + logf("[INFO][%s] acme: Trying to solve TLS-SNI-01", domain) + + // Generate the Key Authorization for the challenge + keyAuth, err := getKeyAuthorization(chlng.Token, t.jws.privKey) + if err != nil { + return err + } + + err = t.provider.Present(domain, chlng.Token, keyAuth) + if err != nil { + return fmt.Errorf("[%s] error presenting token: %v", domain, err) + } + defer func() { + err := t.provider.CleanUp(domain, chlng.Token, keyAuth) + if err != nil { + log.Printf("[%s] error cleaning up: %v", domain, err) + } + }() + return t.validate(t.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth}) +} + +// TLSSNI01ChallengeCert returns a certificate and target domain for the `tls-sni-01` challenge +func TLSSNI01ChallengeCert(keyAuth string) (tls.Certificate, string, error) { + // generate a new RSA key for the certificates + tempPrivKey, err := generatePrivateKey(RSA2048) + if err != nil { + return tls.Certificate{}, "", err + } + rsaPrivKey := tempPrivKey.(*rsa.PrivateKey) + rsaPrivPEM := pemEncode(rsaPrivKey) + + zBytes := sha256.Sum256([]byte(keyAuth)) + z := hex.EncodeToString(zBytes[:sha256.Size]) + domain := fmt.Sprintf("%s.%s.acme.invalid", z[:32], z[32:]) + tempCertPEM, err := generatePemCert(rsaPrivKey, domain) + if err != nil { + return tls.Certificate{}, "", err + } + + certificate, err := tls.X509KeyPair(tempCertPEM, rsaPrivPEM) + if err != nil { + return tls.Certificate{}, "", err + } + + return certificate, domain, nil +} diff --git a/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go new file mode 100644 index 000000000000..df00fbb5aefc --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go @@ -0,0 +1,62 @@ +package acme + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" +) + +// TLSProviderServer implements ChallengeProvider for `TLS-SNI-01` challenge +// It may be instantiated without using the NewTLSProviderServer function if +// you want only to use the default values. +type TLSProviderServer struct { + iface string + port string + done chan bool + listener net.Listener +} + +// NewTLSProviderServer creates a new TLSProviderServer on the selected interface and port. +// Setting iface and / or port to an empty string will make the server fall back to +// the "any" interface and port 443 respectively. +func NewTLSProviderServer(iface, port string) *TLSProviderServer { + return &TLSProviderServer{iface: iface, port: port} +} + +// Present makes the keyAuth available as a cert +func (s *TLSProviderServer) Present(domain, token, keyAuth string) error { + if s.port == "" { + s.port = "443" + } + + cert, _, err := TLSSNI01ChallengeCert(keyAuth) + if err != nil { + return err + } + + tlsConf := new(tls.Config) + tlsConf.Certificates = []tls.Certificate{cert} + + s.listener, err = tls.Listen("tcp", net.JoinHostPort(s.iface, s.port), tlsConf) + if err != nil { + return fmt.Errorf("Could not start HTTPS server for challenge -> %v", err) + } + + s.done = make(chan bool) + go func() { + http.Serve(s.listener, nil) + s.done <- true + }() + return nil +} + +// CleanUp closes the HTTP server. +func (s *TLSProviderServer) CleanUp(domain, token, keyAuth string) error { + if s.listener == nil { + return nil + } + s.listener.Close() + <-s.done + return nil +} diff --git a/vendor/github.com/xenolf/lego/acme/utils.go b/vendor/github.com/xenolf/lego/acme/utils.go new file mode 100644 index 000000000000..2fa0db3040ed --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/utils.go @@ -0,0 +1,29 @@ +package acme + +import ( + "fmt" + "time" +) + +// WaitFor polls the given function 'f', once every 'interval', up to 'timeout'. +func WaitFor(timeout, interval time.Duration, f func() (bool, error)) error { + var lastErr string + timeup := time.After(timeout) + for { + select { + case <-timeup: + return fmt.Errorf("Time limit exceeded. Last error: %s", lastErr) + default: + } + + stop, err := f() + if stop { + return nil + } + if err != nil { + lastErr = err.Error() + } + + time.Sleep(interval) + } +} diff --git a/vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare.go b/vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare.go new file mode 100644 index 000000000000..3f869c106d48 --- /dev/null +++ b/vendor/github.com/xenolf/lego/providers/dns/cloudflare/cloudflare.go @@ -0,0 +1,219 @@ +// Package cloudflare implements a DNS provider for solving the DNS-01 +// challenge using cloudflare DNS. +package cloudflare + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "time" + + "github.com/xenolf/lego/acme" +) + +// CloudFlareAPIURL represents the API endpoint to call. +// TODO: Unexport? +const CloudFlareAPIURL = "https://api.cloudflare.com/client/v4" + +// DNSProvider is an implementation of the acme.ChallengeProvider interface +type DNSProvider struct { + authEmail string + authKey string +} + +// NewDNSProvider returns a DNSProvider instance configured for cloudflare. +// Credentials must be passed in the environment variables: CLOUDFLARE_EMAIL +// and CLOUDFLARE_API_KEY. +func NewDNSProvider() (*DNSProvider, error) { + email := os.Getenv("CLOUDFLARE_EMAIL") + key := os.Getenv("CLOUDFLARE_API_KEY") + return NewDNSProviderCredentials(email, key) +} + +// NewDNSProviderCredentials uses the supplied credentials to return a +// DNSProvider instance configured for cloudflare. +func NewDNSProviderCredentials(email, key string) (*DNSProvider, error) { + if email == "" || key == "" { + return nil, fmt.Errorf("CloudFlare credentials missing") + } + + return &DNSProvider{ + authEmail: email, + authKey: key, + }, nil +} + +// Timeout returns the timeout and interval to use when checking for DNS +// propagation. Adjusting here to cope with spikes in propagation times. +func (c *DNSProvider) Timeout() (timeout, interval time.Duration) { + return 120 * time.Second, 2 * time.Second +} + +// Present creates a TXT record to fulfil the dns-01 challenge +func (c *DNSProvider) Present(domain, token, keyAuth string) error { + fqdn, value, _ := acme.DNS01Record(domain, keyAuth) + zoneID, err := c.getHostedZoneID(fqdn) + if err != nil { + return err + } + + rec := cloudFlareRecord{ + Type: "TXT", + Name: acme.UnFqdn(fqdn), + Content: value, + TTL: 120, + } + + body, err := json.Marshal(rec) + if err != nil { + return err + } + + _, err = c.makeRequest("POST", fmt.Sprintf("/zones/%s/dns_records", zoneID), bytes.NewReader(body)) + if err != nil { + return err + } + + return nil +} + +// CleanUp removes the TXT record matching the specified parameters +func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { + fqdn, _, _ := acme.DNS01Record(domain, keyAuth) + + record, err := c.findTxtRecord(fqdn) + if err != nil { + return err + } + + _, err = c.makeRequest("DELETE", fmt.Sprintf("/zones/%s/dns_records/%s", record.ZoneID, record.ID), nil) + if err != nil { + return err + } + + return nil +} + +func (c *DNSProvider) getHostedZoneID(fqdn string) (string, error) { + // HostedZone represents a CloudFlare DNS zone + type HostedZone struct { + ID string `json:"id"` + Name string `json:"name"` + } + + authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) + if err != nil { + return "", err + } + + result, err := c.makeRequest("GET", "/zones?name="+acme.UnFqdn(authZone), nil) + if err != nil { + return "", err + } + + var hostedZone []HostedZone + err = json.Unmarshal(result, &hostedZone) + if err != nil { + return "", err + } + + if len(hostedZone) != 1 { + return "", fmt.Errorf("Zone %s not found in CloudFlare for domain %s", authZone, fqdn) + } + + return hostedZone[0].ID, nil +} + +func (c *DNSProvider) findTxtRecord(fqdn string) (*cloudFlareRecord, error) { + zoneID, err := c.getHostedZoneID(fqdn) + if err != nil { + return nil, err + } + + result, err := c.makeRequest("GET", fmt.Sprintf("/zones/%s/dns_records?per_page=1000", zoneID), nil) + if err != nil { + return nil, err + } + + var records []cloudFlareRecord + err = json.Unmarshal(result, &records) + if err != nil { + return nil, err + } + + for _, rec := range records { + if rec.Name == acme.UnFqdn(fqdn) && rec.Type == "TXT" { + return &rec, nil + } + } + + return nil, fmt.Errorf("No existing record found for %s", fqdn) +} + +func (c *DNSProvider) makeRequest(method, uri string, body io.Reader) (json.RawMessage, error) { + // APIError contains error details for failed requests + type APIError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` + ErrorChain []APIError `json:"error_chain,omitempty"` + } + + // APIResponse represents a response from CloudFlare API + type APIResponse struct { + Success bool `json:"success"` + Errors []*APIError `json:"errors"` + Result json.RawMessage `json:"result"` + } + + req, err := http.NewRequest(method, fmt.Sprintf("%s%s", CloudFlareAPIURL, uri), body) + if err != nil { + return nil, err + } + + req.Header.Set("X-Auth-Email", c.authEmail) + req.Header.Set("X-Auth-Key", c.authKey) + //req.Header.Set("User-Agent", userAgent()) + + client := http.Client{Timeout: 30 * time.Second} + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("Error querying Cloudflare API -> %v", err) + } + + defer resp.Body.Close() + + var r APIResponse + err = json.NewDecoder(resp.Body).Decode(&r) + if err != nil { + return nil, err + } + + if !r.Success { + if len(r.Errors) > 0 { + errStr := "" + for _, apiErr := range r.Errors { + errStr += fmt.Sprintf("\t Error: %d: %s", apiErr.Code, apiErr.Message) + for _, chainErr := range apiErr.ErrorChain { + errStr += fmt.Sprintf("<- %d: %s", chainErr.Code, chainErr.Message) + } + } + return nil, fmt.Errorf("Cloudflare API Error \n%s", errStr) + } + return nil, fmt.Errorf("Cloudflare API error") + } + + return r.Result, nil +} + +// cloudFlareRecord represents a CloudFlare DNS record +type cloudFlareRecord struct { + Name string `json:"name"` + Type string `json:"type"` + Content string `json:"content"` + ID string `json:"id,omitempty"` + TTL int `json:"ttl,omitempty"` + ZoneID string `json:"zone_id,omitempty"` +} diff --git a/vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean.go b/vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean.go new file mode 100644 index 000000000000..da261b39ada6 --- /dev/null +++ b/vendor/github.com/xenolf/lego/providers/dns/digitalocean/digitalocean.go @@ -0,0 +1,166 @@ +// Package digitalocean implements a DNS provider for solving the DNS-01 +// challenge using digitalocean DNS. +package digitalocean + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "os" + "sync" + "time" + + "github.com/xenolf/lego/acme" +) + +// DNSProvider is an implementation of the acme.ChallengeProvider interface +// that uses DigitalOcean's REST API to manage TXT records for a domain. +type DNSProvider struct { + apiAuthToken string + recordIDs map[string]int + recordIDsMu sync.Mutex +} + +// NewDNSProvider returns a DNSProvider instance configured for Digital +// Ocean. Credentials must be passed in the environment variable: +// DO_AUTH_TOKEN. +func NewDNSProvider() (*DNSProvider, error) { + apiAuthToken := os.Getenv("DO_AUTH_TOKEN") + return NewDNSProviderCredentials(apiAuthToken) +} + +// NewDNSProviderCredentials uses the supplied credentials to return a +// DNSProvider instance configured for Digital Ocean. +func NewDNSProviderCredentials(apiAuthToken string) (*DNSProvider, error) { + if apiAuthToken == "" { + return nil, fmt.Errorf("DigitalOcean credentials missing") + } + return &DNSProvider{ + apiAuthToken: apiAuthToken, + recordIDs: make(map[string]int), + }, nil +} + +// Present creates a TXT record using the specified parameters +func (d *DNSProvider) Present(domain, token, keyAuth string) error { + // txtRecordRequest represents the request body to DO's API to make a TXT record + type txtRecordRequest struct { + RecordType string `json:"type"` + Name string `json:"name"` + Data string `json:"data"` + } + + // txtRecordResponse represents a response from DO's API after making a TXT record + type txtRecordResponse struct { + DomainRecord struct { + ID int `json:"id"` + Type string `json:"type"` + Name string `json:"name"` + Data string `json:"data"` + } `json:"domain_record"` + } + + fqdn, value, _ := acme.DNS01Record(domain, keyAuth) + + authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers) + if err != nil { + return fmt.Errorf("Could not determine zone for domain: '%s'. %s", domain, err) + } + + authZone = acme.UnFqdn(authZone) + + reqURL := fmt.Sprintf("%s/v2/domains/%s/records", digitalOceanBaseURL, authZone) + reqData := txtRecordRequest{RecordType: "TXT", Name: fqdn, Data: value} + body, err := json.Marshal(reqData) + if err != nil { + return err + } + + req, err := http.NewRequest("POST", reqURL, bytes.NewReader(body)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", d.apiAuthToken)) + + client := http.Client{Timeout: 30 * time.Second} + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + var errInfo digitalOceanAPIError + json.NewDecoder(resp.Body).Decode(&errInfo) + return fmt.Errorf("HTTP %d: %s: %s", resp.StatusCode, errInfo.ID, errInfo.Message) + } + + // Everything looks good; but we'll need the ID later to delete the record + var respData txtRecordResponse + err = json.NewDecoder(resp.Body).Decode(&respData) + if err != nil { + return err + } + d.recordIDsMu.Lock() + d.recordIDs[fqdn] = respData.DomainRecord.ID + d.recordIDsMu.Unlock() + + return nil +} + +// CleanUp removes the TXT record matching the specified parameters +func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { + fqdn, _, _ := acme.DNS01Record(domain, keyAuth) + + // get the record's unique ID from when we created it + d.recordIDsMu.Lock() + recordID, ok := d.recordIDs[fqdn] + d.recordIDsMu.Unlock() + if !ok { + return fmt.Errorf("unknown record ID for '%s'", fqdn) + } + + authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers) + if err != nil { + return fmt.Errorf("Could not determine zone for domain: '%s'. %s", domain, err) + } + + authZone = acme.UnFqdn(authZone) + + reqURL := fmt.Sprintf("%s/v2/domains/%s/records/%d", digitalOceanBaseURL, authZone, recordID) + req, err := http.NewRequest("DELETE", reqURL, nil) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", d.apiAuthToken)) + + client := http.Client{Timeout: 30 * time.Second} + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + var errInfo digitalOceanAPIError + json.NewDecoder(resp.Body).Decode(&errInfo) + return fmt.Errorf("HTTP %d: %s: %s", resp.StatusCode, errInfo.ID, errInfo.Message) + } + + // Delete record ID from map + d.recordIDsMu.Lock() + delete(d.recordIDs, fqdn) + d.recordIDsMu.Unlock() + + return nil +} + +type digitalOceanAPIError struct { + ID string `json:"id"` + Message string `json:"message"` +} + +var digitalOceanBaseURL = "https://api.digitalocean.com" diff --git a/vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple.go b/vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple.go new file mode 100644 index 000000000000..c903a35ce832 --- /dev/null +++ b/vendor/github.com/xenolf/lego/providers/dns/dnsimple/dnsimple.go @@ -0,0 +1,141 @@ +// Package dnsimple implements a DNS provider for solving the DNS-01 challenge +// using dnsimple DNS. +package dnsimple + +import ( + "fmt" + "os" + "strings" + + "github.com/weppos/dnsimple-go/dnsimple" + "github.com/xenolf/lego/acme" +) + +// DNSProvider is an implementation of the acme.ChallengeProvider interface. +type DNSProvider struct { + client *dnsimple.Client +} + +// NewDNSProvider returns a DNSProvider instance configured for dnsimple. +// Credentials must be passed in the environment variables: DNSIMPLE_EMAIL +// and DNSIMPLE_API_KEY. +func NewDNSProvider() (*DNSProvider, error) { + email := os.Getenv("DNSIMPLE_EMAIL") + key := os.Getenv("DNSIMPLE_API_KEY") + return NewDNSProviderCredentials(email, key) +} + +// NewDNSProviderCredentials uses the supplied credentials to return a +// DNSProvider instance configured for dnsimple. +func NewDNSProviderCredentials(email, key string) (*DNSProvider, error) { + if email == "" || key == "" { + return nil, fmt.Errorf("DNSimple credentials missing") + } + + return &DNSProvider{ + client: dnsimple.NewClient(key, email), + }, nil +} + +// Present creates a TXT record to fulfil the dns-01 challenge. +func (c *DNSProvider) Present(domain, token, keyAuth string) error { + fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) + + zoneID, zoneName, err := c.getHostedZone(domain) + if err != nil { + return err + } + + recordAttributes := c.newTxtRecord(zoneName, fqdn, value, ttl) + _, _, err = c.client.Domains.CreateRecord(zoneID, *recordAttributes) + if err != nil { + return fmt.Errorf("DNSimple API call failed: %v", err) + } + + return nil +} + +// CleanUp removes the TXT record matching the specified parameters. +func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { + fqdn, _, _ := acme.DNS01Record(domain, keyAuth) + + records, err := c.findTxtRecords(domain, fqdn) + if err != nil { + return err + } + + for _, rec := range records { + _, err := c.client.Domains.DeleteRecord(rec.DomainId, rec.Id) + if err != nil { + return err + } + } + return nil +} + +func (c *DNSProvider) getHostedZone(domain string) (string, string, error) { + zones, _, err := c.client.Domains.List() + if err != nil { + return "", "", fmt.Errorf("DNSimple API call failed: %v", err) + } + + authZone, err := acme.FindZoneByFqdn(acme.ToFqdn(domain), acme.RecursiveNameservers) + if err != nil { + return "", "", err + } + + var hostedZone dnsimple.Domain + for _, zone := range zones { + if zone.Name == acme.UnFqdn(authZone) { + hostedZone = zone + } + } + + if hostedZone.Id == 0 { + return "", "", fmt.Errorf("Zone %s not found in DNSimple for domain %s", authZone, domain) + + } + + return fmt.Sprintf("%v", hostedZone.Id), hostedZone.Name, nil +} + +func (c *DNSProvider) findTxtRecords(domain, fqdn string) ([]dnsimple.Record, error) { + zoneID, zoneName, err := c.getHostedZone(domain) + if err != nil { + return nil, err + } + + var records []dnsimple.Record + result, _, err := c.client.Domains.ListRecords(zoneID, "", "TXT") + if err != nil { + return records, fmt.Errorf("DNSimple API call has failed: %v", err) + } + + recordName := c.extractRecordName(fqdn, zoneName) + for _, record := range result { + if record.Name == recordName { + records = append(records, record) + } + } + + return records, nil +} + +func (c *DNSProvider) newTxtRecord(zone, fqdn, value string, ttl int) *dnsimple.Record { + name := c.extractRecordName(fqdn, zone) + + return &dnsimple.Record{ + Type: "TXT", + Name: name, + Content: value, + TTL: ttl, + } +} + +func (c *DNSProvider) extractRecordName(fqdn, domain string) string { + name := acme.UnFqdn(fqdn) + if idx := strings.Index(name, "."+domain); idx != -1 { + return name[:idx] + } + return name +} diff --git a/vendor/github.com/xenolf/lego/providers/dns/dyn/dyn.go b/vendor/github.com/xenolf/lego/providers/dns/dyn/dyn.go new file mode 100644 index 000000000000..384bc850cf59 --- /dev/null +++ b/vendor/github.com/xenolf/lego/providers/dns/dyn/dyn.go @@ -0,0 +1,274 @@ +// Package dyn implements a DNS provider for solving the DNS-01 challenge +// using Dyn Managed DNS. +package dyn + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "os" + "strconv" + "time" + + "github.com/xenolf/lego/acme" +) + +var dynBaseURL = "https://api.dynect.net/REST" + +type dynResponse struct { + // One of 'success', 'failure', or 'incomplete' + Status string `json:"status"` + + // The structure containing the actual results of the request + Data json.RawMessage `json:"data"` + + // The ID of the job that was created in response to a request. + JobID int `json:"job_id"` + + // A list of zero or more messages + Messages json.RawMessage `json:"msgs"` +} + +// DNSProvider is an implementation of the acme.ChallengeProvider interface that uses +// Dyn's Managed DNS API to manage TXT records for a domain. +type DNSProvider struct { + customerName string + userName string + password string + token string +} + +// NewDNSProvider returns a DNSProvider instance configured for Dyn DNS. +// Credentials must be passed in the environment variables: DYN_CUSTOMER_NAME, +// DYN_USER_NAME and DYN_PASSWORD. +func NewDNSProvider() (*DNSProvider, error) { + customerName := os.Getenv("DYN_CUSTOMER_NAME") + userName := os.Getenv("DYN_USER_NAME") + password := os.Getenv("DYN_PASSWORD") + return NewDNSProviderCredentials(customerName, userName, password) +} + +// NewDNSProviderCredentials uses the supplied credentials to return a +// DNSProvider instance configured for Dyn DNS. +func NewDNSProviderCredentials(customerName, userName, password string) (*DNSProvider, error) { + if customerName == "" || userName == "" || password == "" { + return nil, fmt.Errorf("DynDNS credentials missing") + } + + return &DNSProvider{ + customerName: customerName, + userName: userName, + password: password, + }, nil +} + +func (d *DNSProvider) sendRequest(method, resource string, payload interface{}) (*dynResponse, error) { + url := fmt.Sprintf("%s/%s", dynBaseURL, resource) + + body, err := json.Marshal(payload) + if err != nil { + return nil, err + } + + req, err := http.NewRequest(method, url, bytes.NewReader(body)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + if len(d.token) > 0 { + req.Header.Set("Auth-Token", d.token) + } + + client := &http.Client{Timeout: time.Duration(10 * time.Second)} + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("Dyn API request failed with HTTP status code %d", resp.StatusCode) + } else if resp.StatusCode == 307 { + // TODO add support for HTTP 307 response and long running jobs + return nil, fmt.Errorf("Dyn API request returned HTTP 307. This is currently unsupported") + } + + var dynRes dynResponse + err = json.NewDecoder(resp.Body).Decode(&dynRes) + if err != nil { + return nil, err + } + + if dynRes.Status == "failure" { + // TODO add better error handling + return nil, fmt.Errorf("Dyn API request failed: %s", dynRes.Messages) + } + + return &dynRes, nil +} + +// Starts a new Dyn API Session. Authenticates using customerName, userName, +// password and receives a token to be used in for subsequent requests. +func (d *DNSProvider) login() error { + type creds struct { + Customer string `json:"customer_name"` + User string `json:"user_name"` + Pass string `json:"password"` + } + + type session struct { + Token string `json:"token"` + Version string `json:"version"` + } + + payload := &creds{Customer: d.customerName, User: d.userName, Pass: d.password} + dynRes, err := d.sendRequest("POST", "Session", payload) + if err != nil { + return err + } + + var s session + err = json.Unmarshal(dynRes.Data, &s) + if err != nil { + return err + } + + d.token = s.Token + + return nil +} + +// Destroys Dyn Session +func (d *DNSProvider) logout() error { + if len(d.token) == 0 { + // nothing to do + return nil + } + + url := fmt.Sprintf("%s/Session", dynBaseURL) + req, err := http.NewRequest("DELETE", url, nil) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Auth-Token", d.token) + + client := &http.Client{Timeout: time.Duration(10 * time.Second)} + resp, err := client.Do(req) + if err != nil { + return err + } + resp.Body.Close() + + if resp.StatusCode != 200 { + return fmt.Errorf("Dyn API request failed to delete session with HTTP status code %d", resp.StatusCode) + } + + d.token = "" + + return nil +} + +// Present creates a TXT record using the specified parameters +func (d *DNSProvider) Present(domain, token, keyAuth string) error { + fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) + + authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) + if err != nil { + return err + } + + err = d.login() + if err != nil { + return err + } + + data := map[string]interface{}{ + "rdata": map[string]string{ + "txtdata": value, + }, + "ttl": strconv.Itoa(ttl), + } + + resource := fmt.Sprintf("TXTRecord/%s/%s/", authZone, fqdn) + _, err = d.sendRequest("POST", resource, data) + if err != nil { + return err + } + + err = d.publish(authZone, "Added TXT record for ACME dns-01 challenge using lego client") + if err != nil { + return err + } + + err = d.logout() + if err != nil { + return err + } + + return nil +} + +func (d *DNSProvider) publish(zone, notes string) error { + type publish struct { + Publish bool `json:"publish"` + Notes string `json:"notes"` + } + + pub := &publish{Publish: true, Notes: notes} + resource := fmt.Sprintf("Zone/%s/", zone) + _, err := d.sendRequest("PUT", resource, pub) + if err != nil { + return err + } + + return nil +} + +// CleanUp removes the TXT record matching the specified parameters +func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { + fqdn, _, _ := acme.DNS01Record(domain, keyAuth) + + authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) + if err != nil { + return err + } + + err = d.login() + if err != nil { + return err + } + + resource := fmt.Sprintf("TXTRecord/%s/%s/", authZone, fqdn) + url := fmt.Sprintf("%s/%s", dynBaseURL, resource) + req, err := http.NewRequest("DELETE", url, nil) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Auth-Token", d.token) + + client := &http.Client{Timeout: time.Duration(10 * time.Second)} + resp, err := client.Do(req) + if err != nil { + return err + } + resp.Body.Close() + + if resp.StatusCode != 200 { + return fmt.Errorf("Dyn API request failed to delete TXT record HTTP status code %d", resp.StatusCode) + } + + err = d.publish(authZone, "Removed TXT record for ACME dns-01 challenge using lego client") + if err != nil { + return err + } + + err = d.logout() + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/xenolf/lego/providers/dns/gandi/gandi.go b/vendor/github.com/xenolf/lego/providers/dns/gandi/gandi.go new file mode 100644 index 000000000000..422b02a21265 --- /dev/null +++ b/vendor/github.com/xenolf/lego/providers/dns/gandi/gandi.go @@ -0,0 +1,472 @@ +// Package gandi implements a DNS provider for solving the DNS-01 +// challenge using Gandi DNS. +package gandi + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "strings" + "sync" + "time" + + "github.com/xenolf/lego/acme" +) + +// Gandi API reference: http://doc.rpc.gandi.net/index.html +// Gandi API domain examples: http://doc.rpc.gandi.net/domain/faq.html + +var ( + // endpoint is the Gandi XML-RPC endpoint used by Present and + // CleanUp. It is overridden during tests. + endpoint = "https://rpc.gandi.net/xmlrpc/" + // findZoneByFqdn determines the DNS zone of an fqdn. It is overridden + // during tests. + findZoneByFqdn = acme.FindZoneByFqdn +) + +// inProgressInfo contains information about an in-progress challenge +type inProgressInfo struct { + zoneID int // zoneID of gandi zone to restore in CleanUp + newZoneID int // zoneID of temporary gandi zone containing TXT record + authZone string // the domain name registered at gandi with trailing "." +} + +// DNSProvider is an implementation of the +// acme.ChallengeProviderTimeout interface that uses Gandi's XML-RPC +// API to manage TXT records for a domain. +type DNSProvider struct { + apiKey string + inProgressFQDNs map[string]inProgressInfo + inProgressAuthZones map[string]struct{} + inProgressMu sync.Mutex +} + +// NewDNSProvider returns a DNSProvider instance configured for Gandi. +// Credentials must be passed in the environment variable: GANDI_API_KEY. +func NewDNSProvider() (*DNSProvider, error) { + apiKey := os.Getenv("GANDI_API_KEY") + return NewDNSProviderCredentials(apiKey) +} + +// NewDNSProviderCredentials uses the supplied credentials to return a +// DNSProvider instance configured for Gandi. +func NewDNSProviderCredentials(apiKey string) (*DNSProvider, error) { + if apiKey == "" { + return nil, fmt.Errorf("No Gandi API Key given") + } + return &DNSProvider{ + apiKey: apiKey, + inProgressFQDNs: make(map[string]inProgressInfo), + inProgressAuthZones: make(map[string]struct{}), + }, nil +} + +// Present creates a TXT record using the specified parameters. It +// does this by creating and activating a new temporary Gandi DNS +// zone. This new zone contains the TXT record. +func (d *DNSProvider) Present(domain, token, keyAuth string) error { + fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) + if ttl < 300 { + ttl = 300 // 300 is gandi minimum value for ttl + } + // find authZone and Gandi zone_id for fqdn + authZone, err := findZoneByFqdn(fqdn, acme.RecursiveNameservers) + if err != nil { + return fmt.Errorf("Gandi DNS: findZoneByFqdn failure: %v", err) + } + zoneID, err := d.getZoneID(authZone) + if err != nil { + return err + } + // determine name of TXT record + if !strings.HasSuffix( + strings.ToLower(fqdn), strings.ToLower("."+authZone)) { + return fmt.Errorf( + "Gandi DNS: unexpected authZone %s for fqdn %s", authZone, fqdn) + } + name := fqdn[:len(fqdn)-len("."+authZone)] + // acquire lock and check there is not a challenge already in + // progress for this value of authZone + d.inProgressMu.Lock() + defer d.inProgressMu.Unlock() + if _, ok := d.inProgressAuthZones[authZone]; ok { + return fmt.Errorf( + "Gandi DNS: challenge already in progress for authZone %s", + authZone) + } + // perform API actions to create and activate new gandi zone + // containing the required TXT record + newZoneName := fmt.Sprintf( + "%s [ACME Challenge %s]", + acme.UnFqdn(authZone), time.Now().Format(time.RFC822Z)) + newZoneID, err := d.cloneZone(zoneID, newZoneName) + if err != nil { + return err + } + newZoneVersion, err := d.newZoneVersion(newZoneID) + if err != nil { + return err + } + err = d.addTXTRecord(newZoneID, newZoneVersion, name, value, ttl) + if err != nil { + return err + } + err = d.setZoneVersion(newZoneID, newZoneVersion) + if err != nil { + return err + } + err = d.setZone(authZone, newZoneID) + if err != nil { + return err + } + // save data necessary for CleanUp + d.inProgressFQDNs[fqdn] = inProgressInfo{ + zoneID: zoneID, + newZoneID: newZoneID, + authZone: authZone, + } + d.inProgressAuthZones[authZone] = struct{}{} + return nil +} + +// CleanUp removes the TXT record matching the specified +// parameters. It does this by restoring the old Gandi DNS zone and +// removing the temporary one created by Present. +func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { + fqdn, _, _ := acme.DNS01Record(domain, keyAuth) + // acquire lock and retrieve zoneID, newZoneID and authZone + d.inProgressMu.Lock() + defer d.inProgressMu.Unlock() + if _, ok := d.inProgressFQDNs[fqdn]; !ok { + // if there is no cleanup information then just return + return nil + } + zoneID := d.inProgressFQDNs[fqdn].zoneID + newZoneID := d.inProgressFQDNs[fqdn].newZoneID + authZone := d.inProgressFQDNs[fqdn].authZone + delete(d.inProgressFQDNs, fqdn) + delete(d.inProgressAuthZones, authZone) + // perform API actions to restore old gandi zone for authZone + err := d.setZone(authZone, zoneID) + if err != nil { + return err + } + err = d.deleteZone(newZoneID) + if err != nil { + return err + } + return nil +} + +// Timeout returns the values (40*time.Minute, 60*time.Second) which +// are used by the acme package as timeout and check interval values +// when checking for DNS record propagation with Gandi. +func (d *DNSProvider) Timeout() (timeout, interval time.Duration) { + return 40 * time.Minute, 60 * time.Second +} + +// types for XML-RPC method calls and parameters + +type param interface { + param() +} +type paramString struct { + XMLName xml.Name `xml:"param"` + Value string `xml:"value>string"` +} +type paramInt struct { + XMLName xml.Name `xml:"param"` + Value int `xml:"value>int"` +} + +type structMember interface { + structMember() +} +type structMemberString struct { + Name string `xml:"name"` + Value string `xml:"value>string"` +} +type structMemberInt struct { + Name string `xml:"name"` + Value int `xml:"value>int"` +} +type paramStruct struct { + XMLName xml.Name `xml:"param"` + StructMembers []structMember `xml:"value>struct>member"` +} + +func (p paramString) param() {} +func (p paramInt) param() {} +func (m structMemberString) structMember() {} +func (m structMemberInt) structMember() {} +func (p paramStruct) param() {} + +type methodCall struct { + XMLName xml.Name `xml:"methodCall"` + MethodName string `xml:"methodName"` + Params []param `xml:"params"` +} + +// types for XML-RPC responses + +type response interface { + faultCode() int + faultString() string +} + +type responseFault struct { + FaultCode int `xml:"fault>value>struct>member>value>int"` + FaultString string `xml:"fault>value>struct>member>value>string"` +} + +func (r responseFault) faultCode() int { return r.FaultCode } +func (r responseFault) faultString() string { return r.FaultString } + +type responseStruct struct { + responseFault + StructMembers []struct { + Name string `xml:"name"` + ValueInt int `xml:"value>int"` + } `xml:"params>param>value>struct>member"` +} + +type responseInt struct { + responseFault + Value int `xml:"params>param>value>int"` +} + +type responseBool struct { + responseFault + Value bool `xml:"params>param>value>boolean"` +} + +// POSTing/Marshalling/Unmarshalling + +type rpcError struct { + faultCode int + faultString string +} + +func (e rpcError) Error() string { + return fmt.Sprintf( + "Gandi DNS: RPC Error: (%d) %s", e.faultCode, e.faultString) +} + +func httpPost(url string, bodyType string, body io.Reader) ([]byte, error) { + client := http.Client{Timeout: 60 * time.Second} + resp, err := client.Post(url, bodyType, body) + if err != nil { + return nil, fmt.Errorf("Gandi DNS: HTTP Post Error: %v", err) + } + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("Gandi DNS: HTTP Post Error: %v", err) + } + return b, nil +} + +// rpcCall makes an XML-RPC call to Gandi's RPC endpoint by +// marshalling the data given in the call argument to XML and sending +// that via HTTP Post to Gandi. The response is then unmarshalled into +// the resp argument. +func rpcCall(call *methodCall, resp response) error { + // marshal + b, err := xml.MarshalIndent(call, "", " ") + if err != nil { + return fmt.Errorf("Gandi DNS: Marshal Error: %v", err) + } + // post + b = append([]byte(``+"\n"), b...) + respBody, err := httpPost(endpoint, "text/xml", bytes.NewReader(b)) + if err != nil { + return err + } + // unmarshal + err = xml.Unmarshal(respBody, resp) + if err != nil { + return fmt.Errorf("Gandi DNS: Unmarshal Error: %v", err) + } + if resp.faultCode() != 0 { + return rpcError{ + faultCode: resp.faultCode(), faultString: resp.faultString()} + } + return nil +} + +// functions to perform API actions + +func (d *DNSProvider) getZoneID(domain string) (int, error) { + resp := &responseStruct{} + err := rpcCall(&methodCall{ + MethodName: "domain.info", + Params: []param{ + paramString{Value: d.apiKey}, + paramString{Value: domain}, + }, + }, resp) + if err != nil { + return 0, err + } + var zoneID int + for _, member := range resp.StructMembers { + if member.Name == "zone_id" { + zoneID = member.ValueInt + } + } + if zoneID == 0 { + return 0, fmt.Errorf( + "Gandi DNS: Could not determine zone_id for %s", domain) + } + return zoneID, nil +} + +func (d *DNSProvider) cloneZone(zoneID int, name string) (int, error) { + resp := &responseStruct{} + err := rpcCall(&methodCall{ + MethodName: "domain.zone.clone", + Params: []param{ + paramString{Value: d.apiKey}, + paramInt{Value: zoneID}, + paramInt{Value: 0}, + paramStruct{ + StructMembers: []structMember{ + structMemberString{ + Name: "name", + Value: name, + }}, + }, + }, + }, resp) + if err != nil { + return 0, err + } + var newZoneID int + for _, member := range resp.StructMembers { + if member.Name == "id" { + newZoneID = member.ValueInt + } + } + if newZoneID == 0 { + return 0, fmt.Errorf("Gandi DNS: Could not determine cloned zone_id") + } + return newZoneID, nil +} + +func (d *DNSProvider) newZoneVersion(zoneID int) (int, error) { + resp := &responseInt{} + err := rpcCall(&methodCall{ + MethodName: "domain.zone.version.new", + Params: []param{ + paramString{Value: d.apiKey}, + paramInt{Value: zoneID}, + }, + }, resp) + if err != nil { + return 0, err + } + if resp.Value == 0 { + return 0, fmt.Errorf("Gandi DNS: Could not create new zone version") + } + return resp.Value, nil +} + +func (d *DNSProvider) addTXTRecord(zoneID int, version int, name string, value string, ttl int) error { + resp := &responseStruct{} + err := rpcCall(&methodCall{ + MethodName: "domain.zone.record.add", + Params: []param{ + paramString{Value: d.apiKey}, + paramInt{Value: zoneID}, + paramInt{Value: version}, + paramStruct{ + StructMembers: []structMember{ + structMemberString{ + Name: "type", + Value: "TXT", + }, structMemberString{ + Name: "name", + Value: name, + }, structMemberString{ + Name: "value", + Value: value, + }, structMemberInt{ + Name: "ttl", + Value: ttl, + }}, + }, + }, + }, resp) + if err != nil { + return err + } + return nil +} + +func (d *DNSProvider) setZoneVersion(zoneID int, version int) error { + resp := &responseBool{} + err := rpcCall(&methodCall{ + MethodName: "domain.zone.version.set", + Params: []param{ + paramString{Value: d.apiKey}, + paramInt{Value: zoneID}, + paramInt{Value: version}, + }, + }, resp) + if err != nil { + return err + } + if !resp.Value { + return fmt.Errorf("Gandi DNS: could not set zone version") + } + return nil +} + +func (d *DNSProvider) setZone(domain string, zoneID int) error { + resp := &responseStruct{} + err := rpcCall(&methodCall{ + MethodName: "domain.zone.set", + Params: []param{ + paramString{Value: d.apiKey}, + paramString{Value: domain}, + paramInt{Value: zoneID}, + }, + }, resp) + if err != nil { + return err + } + var respZoneID int + for _, member := range resp.StructMembers { + if member.Name == "zone_id" { + respZoneID = member.ValueInt + } + } + if respZoneID != zoneID { + return fmt.Errorf( + "Gandi DNS: Could not set new zone_id for %s", domain) + } + return nil +} + +func (d *DNSProvider) deleteZone(zoneID int) error { + resp := &responseBool{} + err := rpcCall(&methodCall{ + MethodName: "domain.zone.delete", + Params: []param{ + paramString{Value: d.apiKey}, + paramInt{Value: zoneID}, + }, + }, resp) + if err != nil { + return err + } + if !resp.Value { + return fmt.Errorf("Gandi DNS: could not delete zone_id") + } + return nil +} diff --git a/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud.go b/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud.go new file mode 100644 index 000000000000..f50260564171 --- /dev/null +++ b/vendor/github.com/xenolf/lego/providers/dns/googlecloud/googlecloud.go @@ -0,0 +1,155 @@ +// Package googlecloud implements a DNS provider for solving the DNS-01 +// challenge using Google Cloud DNS. +package googlecloud + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/xenolf/lego/acme" + + "golang.org/x/net/context" + "golang.org/x/oauth2/google" + + "google.golang.org/api/dns/v1" +) + +// DNSProvider is an implementation of the DNSProvider interface. +type DNSProvider struct { + project string + client *dns.Service +} + +// NewDNSProvider returns a DNSProvider instance configured for Google Cloud +// DNS. Credentials must be passed in the environment variable: GCE_PROJECT. +func NewDNSProvider() (*DNSProvider, error) { + project := os.Getenv("GCE_PROJECT") + return NewDNSProviderCredentials(project) +} + +// NewDNSProviderCredentials uses the supplied credentials to return a +// DNSProvider instance configured for Google Cloud DNS. +func NewDNSProviderCredentials(project string) (*DNSProvider, error) { + if project == "" { + return nil, fmt.Errorf("Google Cloud project name missing") + } + + client, err := google.DefaultClient(context.Background(), dns.NdevClouddnsReadwriteScope) + if err != nil { + return nil, fmt.Errorf("Unable to get Google Cloud client: %v", err) + } + svc, err := dns.New(client) + if err != nil { + return nil, fmt.Errorf("Unable to create Google Cloud DNS service: %v", err) + } + return &DNSProvider{ + project: project, + client: svc, + }, nil +} + +// Present creates a TXT record to fulfil the dns-01 challenge. +func (c *DNSProvider) Present(domain, token, keyAuth string) error { + fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) + + zone, err := c.getHostedZone(domain) + if err != nil { + return err + } + + rec := &dns.ResourceRecordSet{ + Name: fqdn, + Rrdatas: []string{value}, + Ttl: int64(ttl), + Type: "TXT", + } + change := &dns.Change{ + Additions: []*dns.ResourceRecordSet{rec}, + } + + chg, err := c.client.Changes.Create(c.project, zone, change).Do() + if err != nil { + return err + } + + // wait for change to be acknowledged + for chg.Status == "pending" { + time.Sleep(time.Second) + + chg, err = c.client.Changes.Get(c.project, zone, chg.Id).Do() + if err != nil { + return err + } + } + + return nil +} + +// CleanUp removes the TXT record matching the specified parameters. +func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { + fqdn, _, _ := acme.DNS01Record(domain, keyAuth) + + zone, err := c.getHostedZone(domain) + if err != nil { + return err + } + + records, err := c.findTxtRecords(zone, fqdn) + if err != nil { + return err + } + + for _, rec := range records { + change := &dns.Change{ + Deletions: []*dns.ResourceRecordSet{rec}, + } + _, err = c.client.Changes.Create(c.project, zone, change).Do() + if err != nil { + return err + } + } + return nil +} + +// Timeout customizes the timeout values used by the ACME package for checking +// DNS record validity. +func (c *DNSProvider) Timeout() (timeout, interval time.Duration) { + return 180 * time.Second, 5 * time.Second +} + +// getHostedZone returns the managed-zone +func (c *DNSProvider) getHostedZone(domain string) (string, error) { + + zones, err := c.client.ManagedZones.List(c.project).Do() + if err != nil { + return "", fmt.Errorf("GoogleCloud API call failed: %v", err) + } + + for _, z := range zones.ManagedZones { + if strings.HasSuffix(domain+".", z.DnsName) { + return z.Name, nil + } + } + + return "", fmt.Errorf("No matching GoogleCloud domain found for domain %s", domain) + +} + +func (c *DNSProvider) findTxtRecords(zone, fqdn string) ([]*dns.ResourceRecordSet, error) { + + recs, err := c.client.ResourceRecordSets.List(c.project, zone).Do() + if err != nil { + return nil, err + } + + found := []*dns.ResourceRecordSet{} + for _, r := range recs.Rrsets { + if r.Type == "TXT" && r.Name == fqdn { + found = append(found, r) + } + } + + return found, nil +} diff --git a/vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap.go b/vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap.go new file mode 100644 index 000000000000..d7eb40935819 --- /dev/null +++ b/vendor/github.com/xenolf/lego/providers/dns/namecheap/namecheap.go @@ -0,0 +1,416 @@ +// Package namecheap implements a DNS provider for solving the DNS-01 +// challenge using namecheap DNS. +package namecheap + +import ( + "encoding/xml" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/xenolf/lego/acme" +) + +// Notes about namecheap's tool API: +// 1. Using the API requires registration. Once registered, use your account +// name and API key to access the API. +// 2. There is no API to add or modify a single DNS record. Instead you must +// read the entire list of records, make modifications, and then write the +// entire updated list of records. (Yuck.) +// 3. Namecheap's DNS updates can be slow to propagate. I've seen them take +// as long as an hour. +// 4. Namecheap requires you to whitelist the IP address from which you call +// its APIs. It also requires all API calls to include the whitelisted IP +// address as a form or query string value. This code uses a namecheap +// service to query the client's IP address. + +var ( + debug = false + defaultBaseURL = "https://api.namecheap.com/xml.response" + getIPURL = "https://dynamicdns.park-your-domain.com/getip" + httpClient = http.Client{Timeout: 60 * time.Second} +) + +// DNSProvider is an implementation of the ChallengeProviderTimeout interface +// that uses Namecheap's tool API to manage TXT records for a domain. +type DNSProvider struct { + baseURL string + apiUser string + apiKey string + clientIP string +} + +// NewDNSProvider returns a DNSProvider instance configured for namecheap. +// Credentials must be passed in the environment variables: NAMECHEAP_API_USER +// and NAMECHEAP_API_KEY. +func NewDNSProvider() (*DNSProvider, error) { + apiUser := os.Getenv("NAMECHEAP_API_USER") + apiKey := os.Getenv("NAMECHEAP_API_KEY") + return NewDNSProviderCredentials(apiUser, apiKey) +} + +// NewDNSProviderCredentials uses the supplied credentials to return a +// DNSProvider instance configured for namecheap. +func NewDNSProviderCredentials(apiUser, apiKey string) (*DNSProvider, error) { + if apiUser == "" || apiKey == "" { + return nil, fmt.Errorf("Namecheap credentials missing") + } + + clientIP, err := getClientIP() + if err != nil { + return nil, err + } + + return &DNSProvider{ + baseURL: defaultBaseURL, + apiUser: apiUser, + apiKey: apiKey, + clientIP: clientIP, + }, nil +} + +// Timeout returns the timeout and interval to use when checking for DNS +// propagation. Namecheap can sometimes take a long time to complete an +// update, so wait up to 60 minutes for the update to propagate. +func (d *DNSProvider) Timeout() (timeout, interval time.Duration) { + return 60 * time.Minute, 15 * time.Second +} + +// host describes a DNS record returned by the Namecheap DNS gethosts API. +// Namecheap uses the term "host" to refer to all DNS records that include +// a host field (A, AAAA, CNAME, NS, TXT, URL). +type host struct { + Type string `xml:",attr"` + Name string `xml:",attr"` + Address string `xml:",attr"` + MXPref string `xml:",attr"` + TTL string `xml:",attr"` +} + +// apierror describes an error record in a namecheap API response. +type apierror struct { + Number int `xml:",attr"` + Description string `xml:",innerxml"` +} + +// getClientIP returns the client's public IP address. It uses namecheap's +// IP discovery service to perform the lookup. +func getClientIP() (addr string, err error) { + resp, err := httpClient.Get(getIPURL) + if err != nil { + return "", err + } + defer resp.Body.Close() + + clientIP, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + + if debug { + fmt.Println("Client IP:", string(clientIP)) + } + return string(clientIP), nil +} + +// A challenge repesents all the data needed to specify a dns-01 challenge +// to lets-encrypt. +type challenge struct { + domain string + key string + keyFqdn string + keyValue string + tld string + sld string + host string +} + +// newChallenge builds a challenge record from a domain name, a challenge +// authentication key, and a map of available TLDs. +func newChallenge(domain, keyAuth string, tlds map[string]string) (*challenge, error) { + domain = acme.UnFqdn(domain) + parts := strings.Split(domain, ".") + + // Find the longest matching TLD. + longest := -1 + for i := len(parts); i > 0; i-- { + t := strings.Join(parts[i-1:], ".") + if _, found := tlds[t]; found { + longest = i - 1 + } + } + if longest < 1 { + return nil, fmt.Errorf("Invalid domain name '%s'", domain) + } + + tld := strings.Join(parts[longest:], ".") + sld := parts[longest-1] + + var host string + if longest >= 1 { + host = strings.Join(parts[:longest-1], ".") + } + + key, keyValue, _ := acme.DNS01Record(domain, keyAuth) + + return &challenge{ + domain: domain, + key: "_acme-challenge." + host, + keyFqdn: key, + keyValue: keyValue, + tld: tld, + sld: sld, + host: host, + }, nil +} + +// setGlobalParams adds the namecheap global parameters to the provided url +// Values record. +func (d *DNSProvider) setGlobalParams(v *url.Values, cmd string) { + v.Set("ApiUser", d.apiUser) + v.Set("ApiKey", d.apiKey) + v.Set("UserName", d.apiUser) + v.Set("ClientIp", d.clientIP) + v.Set("Command", cmd) +} + +// getTLDs requests the list of available TLDs from namecheap. +func (d *DNSProvider) getTLDs() (tlds map[string]string, err error) { + values := make(url.Values) + d.setGlobalParams(&values, "namecheap.domains.getTldList") + + reqURL, _ := url.Parse(d.baseURL) + reqURL.RawQuery = values.Encode() + + resp, err := httpClient.Get(reqURL.String()) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("getHosts HTTP error %d", resp.StatusCode) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + type GetTldsResponse struct { + XMLName xml.Name `xml:"ApiResponse"` + Errors []apierror `xml:"Errors>Error"` + Result []struct { + Name string `xml:",attr"` + } `xml:"CommandResponse>Tlds>Tld"` + } + + var gtr GetTldsResponse + if err := xml.Unmarshal(body, >r); err != nil { + return nil, err + } + if len(gtr.Errors) > 0 { + return nil, fmt.Errorf("Namecheap error: %s [%d]", + gtr.Errors[0].Description, gtr.Errors[0].Number) + } + + tlds = make(map[string]string) + for _, t := range gtr.Result { + tlds[t.Name] = t.Name + } + return tlds, nil +} + +// getHosts reads the full list of DNS host records using the Namecheap API. +func (d *DNSProvider) getHosts(ch *challenge) (hosts []host, err error) { + values := make(url.Values) + d.setGlobalParams(&values, "namecheap.domains.dns.getHosts") + values.Set("SLD", ch.sld) + values.Set("TLD", ch.tld) + + reqURL, _ := url.Parse(d.baseURL) + reqURL.RawQuery = values.Encode() + + resp, err := httpClient.Get(reqURL.String()) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("getHosts HTTP error %d", resp.StatusCode) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + type GetHostsResponse struct { + XMLName xml.Name `xml:"ApiResponse"` + Status string `xml:"Status,attr"` + Errors []apierror `xml:"Errors>Error"` + Hosts []host `xml:"CommandResponse>DomainDNSGetHostsResult>host"` + } + + var ghr GetHostsResponse + if err = xml.Unmarshal(body, &ghr); err != nil { + return nil, err + } + if len(ghr.Errors) > 0 { + return nil, fmt.Errorf("Namecheap error: %s [%d]", + ghr.Errors[0].Description, ghr.Errors[0].Number) + } + + return ghr.Hosts, nil +} + +// setHosts writes the full list of DNS host records using the Namecheap API. +func (d *DNSProvider) setHosts(ch *challenge, hosts []host) error { + values := make(url.Values) + d.setGlobalParams(&values, "namecheap.domains.dns.setHosts") + values.Set("SLD", ch.sld) + values.Set("TLD", ch.tld) + + for i, h := range hosts { + ind := fmt.Sprintf("%d", i+1) + values.Add("HostName"+ind, h.Name) + values.Add("RecordType"+ind, h.Type) + values.Add("Address"+ind, h.Address) + values.Add("MXPref"+ind, h.MXPref) + values.Add("TTL"+ind, h.TTL) + } + + resp, err := httpClient.PostForm(d.baseURL, values) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + return fmt.Errorf("setHosts HTTP error %d", resp.StatusCode) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + type SetHostsResponse struct { + XMLName xml.Name `xml:"ApiResponse"` + Status string `xml:"Status,attr"` + Errors []apierror `xml:"Errors>Error"` + Result struct { + IsSuccess string `xml:",attr"` + } `xml:"CommandResponse>DomainDNSSetHostsResult"` + } + + var shr SetHostsResponse + if err := xml.Unmarshal(body, &shr); err != nil { + return err + } + if len(shr.Errors) > 0 { + return fmt.Errorf("Namecheap error: %s [%d]", + shr.Errors[0].Description, shr.Errors[0].Number) + } + if shr.Result.IsSuccess != "true" { + return fmt.Errorf("Namecheap setHosts failed.") + } + + return nil +} + +// addChallengeRecord adds a DNS challenge TXT record to a list of namecheap +// host records. +func (d *DNSProvider) addChallengeRecord(ch *challenge, hosts *[]host) { + host := host{ + Name: ch.key, + Type: "TXT", + Address: ch.keyValue, + MXPref: "10", + TTL: "120", + } + + // If there's already a TXT record with the same name, replace it. + for i, h := range *hosts { + if h.Name == ch.key && h.Type == "TXT" { + (*hosts)[i] = host + return + } + } + + // No record was replaced, so add a new one. + *hosts = append(*hosts, host) +} + +// removeChallengeRecord removes a DNS challenge TXT record from a list of +// namecheap host records. Return true if a record was removed. +func (d *DNSProvider) removeChallengeRecord(ch *challenge, hosts *[]host) bool { + // Find the challenge TXT record and remove it if found. + for i, h := range *hosts { + if h.Name == ch.key && h.Type == "TXT" { + *hosts = append((*hosts)[:i], (*hosts)[i+1:]...) + return true + } + } + + return false +} + +// Present installs a TXT record for the DNS challenge. +func (d *DNSProvider) Present(domain, token, keyAuth string) error { + tlds, err := d.getTLDs() + if err != nil { + return err + } + + ch, err := newChallenge(domain, keyAuth, tlds) + if err != nil { + return err + } + + hosts, err := d.getHosts(ch) + if err != nil { + return err + } + + d.addChallengeRecord(ch, &hosts) + + if debug { + for _, h := range hosts { + fmt.Printf( + "%-5.5s %-30.30s %-6s %-70.70s\n", + h.Type, h.Name, h.TTL, h.Address) + } + } + + return d.setHosts(ch, hosts) +} + +// CleanUp removes a TXT record used for a previous DNS challenge. +func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { + tlds, err := d.getTLDs() + if err != nil { + return err + } + + ch, err := newChallenge(domain, keyAuth, tlds) + if err != nil { + return err + } + + hosts, err := d.getHosts(ch) + if err != nil { + return err + } + + if removed := d.removeChallengeRecord(ch, &hosts); !removed { + return nil + } + + return d.setHosts(ch, hosts) +} diff --git a/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136.go b/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136.go new file mode 100644 index 000000000000..43a95f18c3db --- /dev/null +++ b/vendor/github.com/xenolf/lego/providers/dns/rfc2136/rfc2136.go @@ -0,0 +1,129 @@ +// Package rfc2136 implements a DNS provider for solving the DNS-01 challenge +// using the rfc2136 dynamic update. +package rfc2136 + +import ( + "fmt" + "net" + "os" + "strings" + "time" + + "github.com/miekg/dns" + "github.com/xenolf/lego/acme" +) + +// DNSProvider is an implementation of the acme.ChallengeProvider interface that +// uses dynamic DNS updates (RFC 2136) to create TXT records on a nameserver. +type DNSProvider struct { + nameserver string + tsigAlgorithm string + tsigKey string + tsigSecret string +} + +// NewDNSProvider returns a DNSProvider instance configured for rfc2136 +// dynamic update. Credentials must be passed in the environment variables: +// RFC2136_NAMESERVER, RFC2136_TSIG_ALGORITHM, RFC2136_TSIG_KEY and +// RFC2136_TSIG_SECRET. To disable TSIG authentication, leave the TSIG +// variables unset. RFC2136_NAMESERVER must be a network address in the form +// "host" or "host:port". +func NewDNSProvider() (*DNSProvider, error) { + nameserver := os.Getenv("RFC2136_NAMESERVER") + tsigAlgorithm := os.Getenv("RFC2136_TSIG_ALGORITHM") + tsigKey := os.Getenv("RFC2136_TSIG_KEY") + tsigSecret := os.Getenv("RFC2136_TSIG_SECRET") + return NewDNSProviderCredentials(nameserver, tsigAlgorithm, tsigKey, tsigSecret) +} + +// NewDNSProviderCredentials uses the supplied credentials to return a +// DNSProvider instance configured for rfc2136 dynamic update. To disable TSIG +// authentication, leave the TSIG parameters as empty strings. +// nameserver must be a network address in the form "host" or "host:port". +func NewDNSProviderCredentials(nameserver, tsigAlgorithm, tsigKey, tsigSecret string) (*DNSProvider, error) { + if nameserver == "" { + return nil, fmt.Errorf("RFC2136 nameserver missing") + } + + // Append the default DNS port if none is specified. + if _, _, err := net.SplitHostPort(nameserver); err != nil { + if strings.Contains(err.Error(), "missing port") { + nameserver = net.JoinHostPort(nameserver, "53") + } else { + return nil, err + } + } + d := &DNSProvider{ + nameserver: nameserver, + } + if tsigAlgorithm == "" { + tsigAlgorithm = dns.HmacMD5 + } + d.tsigAlgorithm = tsigAlgorithm + if len(tsigKey) > 0 && len(tsigSecret) > 0 { + d.tsigKey = tsigKey + d.tsigSecret = tsigSecret + } + + return d, nil +} + +// Present creates a TXT record using the specified parameters +func (r *DNSProvider) Present(domain, token, keyAuth string) error { + fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) + return r.changeRecord("INSERT", fqdn, value, ttl) +} + +// CleanUp removes the TXT record matching the specified parameters +func (r *DNSProvider) CleanUp(domain, token, keyAuth string) error { + fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) + return r.changeRecord("REMOVE", fqdn, value, ttl) +} + +func (r *DNSProvider) changeRecord(action, fqdn, value string, ttl int) error { + // Find the zone for the given fqdn + zone, err := acme.FindZoneByFqdn(fqdn, []string{r.nameserver}) + if err != nil { + return err + } + + // Create RR + rr := new(dns.TXT) + rr.Hdr = dns.RR_Header{Name: fqdn, Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: uint32(ttl)} + rr.Txt = []string{value} + rrs := []dns.RR{rr} + + // Create dynamic update packet + m := new(dns.Msg) + m.SetUpdate(zone) + switch action { + case "INSERT": + // Always remove old challenge left over from who knows what. + m.RemoveRRset(rrs) + m.Insert(rrs) + case "REMOVE": + m.Remove(rrs) + default: + return fmt.Errorf("Unexpected action: %s", action) + } + + // Setup client + c := new(dns.Client) + c.SingleInflight = true + // TSIG authentication / msg signing + if len(r.tsigKey) > 0 && len(r.tsigSecret) > 0 { + m.SetTsig(dns.Fqdn(r.tsigKey), r.tsigAlgorithm, 300, time.Now().Unix()) + c.TsigSecret = map[string]string{dns.Fqdn(r.tsigKey): r.tsigSecret} + } + + // Send the query + reply, _, err := c.Exchange(m, r.nameserver) + if err != nil { + return fmt.Errorf("DNS update failed: %v", err) + } + if reply != nil && reply.Rcode != dns.RcodeSuccess { + return fmt.Errorf("DNS update failed. Server replied: %s", dns.RcodeToString[reply.Rcode]) + } + + return nil +} diff --git a/vendor/github.com/xenolf/lego/providers/dns/route53/route53.go b/vendor/github.com/xenolf/lego/providers/dns/route53/route53.go new file mode 100644 index 000000000000..f8a4bc2b8120 --- /dev/null +++ b/vendor/github.com/xenolf/lego/providers/dns/route53/route53.go @@ -0,0 +1,170 @@ +// Package route53 implements a DNS provider for solving the DNS-01 challenge +// using AWS Route 53 DNS. +package route53 + +import ( + "fmt" + "math/rand" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/xenolf/lego/acme" +) + +const ( + maxRetries = 5 +) + +// DNSProvider implements the acme.ChallengeProvider interface +type DNSProvider struct { + client *route53.Route53 +} + +// customRetryer implements the client.Retryer interface by composing the +// DefaultRetryer. It controls the logic for retrying recoverable request +// errors (e.g. when rate limits are exceeded). +type customRetryer struct { + client.DefaultRetryer +} + +// RetryRules overwrites the DefaultRetryer's method. +// It uses a basic exponential backoff algorithm that returns an initial +// delay of ~400ms with an upper limit of ~30 seconds which should prevent +// causing a high number of consecutive throttling errors. +// For reference: Route 53 enforces an account-wide(!) 5req/s query limit. +func (d customRetryer) RetryRules(r *request.Request) time.Duration { + retryCount := r.RetryCount + if retryCount > 7 { + retryCount = 7 + } + + delay := (1 << uint(retryCount)) * (rand.Intn(50) + 200) + return time.Duration(delay) * time.Millisecond +} + +// NewDNSProvider returns a DNSProvider instance configured for the AWS +// Route 53 service. +// +// AWS Credentials are automatically detected in the following locations +// and prioritized in the following order: +// 1. Environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, +// AWS_REGION, [AWS_SESSION_TOKEN] +// 2. Shared credentials file (defaults to ~/.aws/credentials) +// 3. Amazon EC2 IAM role +// +// See also: https://github.com/aws/aws-sdk-go/wiki/configuring-sdk +func NewDNSProvider() (*DNSProvider, error) { + r := customRetryer{} + r.NumMaxRetries = maxRetries + config := request.WithRetryer(aws.NewConfig(), r) + client := route53.New(session.New(config)) + + return &DNSProvider{client: client}, nil +} + +// Present creates a TXT record using the specified parameters +func (r *DNSProvider) Present(domain, token, keyAuth string) error { + fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) + value = `"` + value + `"` + return r.changeRecord("UPSERT", fqdn, value, ttl) +} + +// CleanUp removes the TXT record matching the specified parameters +func (r *DNSProvider) CleanUp(domain, token, keyAuth string) error { + fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) + value = `"` + value + `"` + return r.changeRecord("DELETE", fqdn, value, ttl) +} + +func (r *DNSProvider) changeRecord(action, fqdn, value string, ttl int) error { + hostedZoneID, err := r.getHostedZoneID(fqdn) + if err != nil { + return fmt.Errorf("Failed to determine Route 53 hosted zone ID: %v", err) + } + + recordSet := newTXTRecordSet(fqdn, value, ttl) + reqParams := &route53.ChangeResourceRecordSetsInput{ + HostedZoneId: aws.String(hostedZoneID), + ChangeBatch: &route53.ChangeBatch{ + Comment: aws.String("Managed by Lego"), + Changes: []*route53.Change{ + { + Action: aws.String(action), + ResourceRecordSet: recordSet, + }, + }, + }, + } + + resp, err := r.client.ChangeResourceRecordSets(reqParams) + if err != nil { + return fmt.Errorf("Failed to change Route 53 record set: %v", err) + } + + statusID := resp.ChangeInfo.Id + + return acme.WaitFor(120*time.Second, 4*time.Second, func() (bool, error) { + reqParams := &route53.GetChangeInput{ + Id: statusID, + } + resp, err := r.client.GetChange(reqParams) + if err != nil { + return false, fmt.Errorf("Failed to query Route 53 change status: %v", err) + } + if *resp.ChangeInfo.Status == route53.ChangeStatusInsync { + return true, nil + } + return false, nil + }) +} + +func (r *DNSProvider) getHostedZoneID(fqdn string) (string, error) { + authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) + if err != nil { + return "", err + } + + // .DNSName should not have a trailing dot + reqParams := &route53.ListHostedZonesByNameInput{ + DNSName: aws.String(acme.UnFqdn(authZone)), + } + resp, err := r.client.ListHostedZonesByName(reqParams) + if err != nil { + return "", err + } + + var hostedZoneID string + for _, hostedZone := range resp.HostedZones { + // .Name has a trailing dot + if !*hostedZone.Config.PrivateZone && *hostedZone.Name == authZone { + hostedZoneID = *hostedZone.Id + break + } + } + + if len(hostedZoneID) == 0 { + return "", fmt.Errorf("Zone %s not found in Route 53 for domain %s", authZone, fqdn) + } + + if strings.HasPrefix(hostedZoneID, "/hostedzone/") { + hostedZoneID = strings.TrimPrefix(hostedZoneID, "/hostedzone/") + } + + return hostedZoneID, nil +} + +func newTXTRecordSet(fqdn, value string, ttl int) *route53.ResourceRecordSet { + return &route53.ResourceRecordSet{ + Name: aws.String(fqdn), + Type: aws.String("TXT"), + TTL: aws.Int64(int64(ttl)), + ResourceRecords: []*route53.ResourceRecord{ + {Value: aws.String(value)}, + }, + } +} diff --git a/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr.go b/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr.go new file mode 100644 index 000000000000..53804e2703cf --- /dev/null +++ b/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr.go @@ -0,0 +1,127 @@ +// Package vultr implements a DNS provider for solving the DNS-01 challenge using +// the vultr DNS. +// See https://www.vultr.com/api/#dns +package vultr + +import ( + "fmt" + "os" + "strings" + + vultr "github.com/JamesClonk/vultr/lib" + "github.com/xenolf/lego/acme" +) + +// DNSProvider is an implementation of the acme.ChallengeProvider interface. +type DNSProvider struct { + client *vultr.Client +} + +// NewDNSProvider returns a DNSProvider instance with a configured Vultr client. +// Authentication uses the VULTR_API_KEY environment variable. +func NewDNSProvider() (*DNSProvider, error) { + apiKey := os.Getenv("VULTR_API_KEY") + return NewDNSProviderCredentials(apiKey) +} + +// NewDNSProviderCredentials uses the supplied credentials to return a DNSProvider +// instance configured for Vultr. +func NewDNSProviderCredentials(apiKey string) (*DNSProvider, error) { + if apiKey == "" { + return nil, fmt.Errorf("Vultr credentials missing") + } + + c := &DNSProvider{ + client: vultr.NewClient(apiKey, nil), + } + + return c, nil +} + +// Present creates a TXT record to fulfil the DNS-01 challenge. +func (c *DNSProvider) Present(domain, token, keyAuth string) error { + fqdn, value, ttl := acme.DNS01Record(domain, keyAuth) + + zoneDomain, err := c.getHostedZone(domain) + if err != nil { + return err + } + + name := c.extractRecordName(fqdn, zoneDomain) + + err = c.client.CreateDnsRecord(zoneDomain, name, "TXT", `"`+value+`"`, 0, ttl) + if err != nil { + return fmt.Errorf("Vultr API call failed: %v", err) + } + + return nil +} + +// CleanUp removes the TXT record matching the specified parameters. +func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error { + fqdn, _, _ := acme.DNS01Record(domain, keyAuth) + + zoneDomain, records, err := c.findTxtRecords(domain, fqdn) + if err != nil { + return err + } + + for _, rec := range records { + err := c.client.DeleteDnsRecord(zoneDomain, rec.RecordID) + if err != nil { + return err + } + } + return nil +} + +func (c *DNSProvider) getHostedZone(domain string) (string, error) { + domains, err := c.client.GetDnsDomains() + if err != nil { + return "", fmt.Errorf("Vultr API call failed: %v", err) + } + + var hostedDomain vultr.DnsDomain + for _, d := range domains { + if strings.HasSuffix(domain, d.Domain) { + if len(d.Domain) > len(hostedDomain.Domain) { + hostedDomain = d + } + } + } + if hostedDomain.Domain == "" { + return "", fmt.Errorf("No matching Vultr domain found for domain %s", domain) + } + + return hostedDomain.Domain, nil +} + +func (c *DNSProvider) findTxtRecords(domain, fqdn string) (string, []vultr.DnsRecord, error) { + zoneDomain, err := c.getHostedZone(domain) + if err != nil { + return "", nil, err + } + + var records []vultr.DnsRecord + result, err := c.client.GetDnsRecords(zoneDomain) + if err != nil { + return "", records, fmt.Errorf("Vultr API call has failed: %v", err) + } + + recordName := c.extractRecordName(fqdn, zoneDomain) + for _, record := range result { + if record.Type == "TXT" && record.Name == recordName { + records = append(records, record) + } + } + + return zoneDomain, records, nil +} + +func (c *DNSProvider) extractRecordName(fqdn, domain string) string { + name := acme.UnFqdn(fqdn) + if idx := strings.Index(name, "."+domain); idx != -1 { + return name[:idx] + } + return name +} diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go new file mode 100644 index 000000000000..6bfbd5da9fef --- /dev/null +++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -0,0 +1,673 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses +// are signed messages attesting to the validity of a certificate for a small +// period of time. This is used to manage revocation for X.509 certificates. +package ocsp // import "golang.org/x/crypto/ocsp" + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "math/big" + "strconv" + "time" +) + +var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1}) + +// ResponseStatus contains the result of an OCSP request. See +// https://tools.ietf.org/html/rfc6960#section-2.3 +type ResponseStatus int + +const ( + Success ResponseStatus = 0 + Malformed ResponseStatus = 1 + InternalError ResponseStatus = 2 + TryLater ResponseStatus = 3 + // Status code four is ununsed in OCSP. See + // https://tools.ietf.org/html/rfc6960#section-4.2.1 + SignatureRequired ResponseStatus = 5 + Unauthorized ResponseStatus = 6 +) + +func (r ResponseStatus) String() string { + switch r { + case Success: + return "success" + case Malformed: + return "malformed" + case InternalError: + return "internal error" + case TryLater: + return "try later" + case SignatureRequired: + return "signature required" + case Unauthorized: + return "unauthorized" + default: + return "unknown OCSP status: " + strconv.Itoa(int(r)) + } +} + +// ResponseError is an error that may be returned by ParseResponse to indicate +// that the response itself is an error, not just that its indicating that a +// certificate is revoked, unknown, etc. +type ResponseError struct { + Status ResponseStatus +} + +func (r ResponseError) Error() string { + return "ocsp: error from server: " + r.Status.String() +} + +// These are internal structures that reflect the ASN.1 structure of an OCSP +// response. See RFC 2560, section 4.2. + +type certID struct { + HashAlgorithm pkix.AlgorithmIdentifier + NameHash []byte + IssuerKeyHash []byte + SerialNumber *big.Int +} + +// https://tools.ietf.org/html/rfc2560#section-4.1.1 +type ocspRequest struct { + TBSRequest tbsRequest +} + +type tbsRequest struct { + Version int `asn1:"explicit,tag:0,default:0,optional"` + RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"` + RequestList []request +} + +type request struct { + Cert certID +} + +type responseASN1 struct { + Status asn1.Enumerated + Response responseBytes `asn1:"explicit,tag:0,optional"` +} + +type responseBytes struct { + ResponseType asn1.ObjectIdentifier + Response []byte +} + +type basicResponse struct { + TBSResponseData responseData + SignatureAlgorithm pkix.AlgorithmIdentifier + Signature asn1.BitString + Certificates []asn1.RawValue `asn1:"explicit,tag:0,optional"` +} + +type responseData struct { + Raw asn1.RawContent + Version int `asn1:"optional,default:1,explicit,tag:0"` + RawResponderName asn1.RawValue `asn1:"optional,explicit,tag:1"` + KeyHash []byte `asn1:"optional,explicit,tag:2"` + ProducedAt time.Time `asn1:"generalized"` + Responses []singleResponse +} + +type singleResponse struct { + CertID certID + Good asn1.Flag `asn1:"tag:0,optional"` + Revoked revokedInfo `asn1:"tag:1,optional"` + Unknown asn1.Flag `asn1:"tag:2,optional"` + ThisUpdate time.Time `asn1:"generalized"` + NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"` + SingleExtensions []pkix.Extension `asn1:"explicit,tag:1,optional"` +} + +type revokedInfo struct { + RevocationTime time.Time `asn1:"generalized"` + Reason asn1.Enumerated `asn1:"explicit,tag:0,optional"` +} + +var ( + oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} + oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} + oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} +) + +var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ + crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), + crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), + crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), + crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), +} + +// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below +var signatureAlgorithmDetails = []struct { + algo x509.SignatureAlgorithm + oid asn1.ObjectIdentifier + pubKeyAlgo x509.PublicKeyAlgorithm + hash crypto.Hash +}{ + {x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */}, + {x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5}, + {x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1}, + {x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256}, + {x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384}, + {x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512}, + {x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1}, + {x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256}, + {x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1}, + {x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256}, + {x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384}, + {x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512}, +} + +// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below +func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) { + var pubType x509.PublicKeyAlgorithm + + switch pub := pub.(type) { + case *rsa.PublicKey: + pubType = x509.RSA + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureSHA256WithRSA + sigAlgo.Parameters = asn1.RawValue{ + Tag: 5, + } + + case *ecdsa.PublicKey: + pubType = x509.ECDSA + + switch pub.Curve { + case elliptic.P224(), elliptic.P256(): + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA256 + case elliptic.P384(): + hashFunc = crypto.SHA384 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA384 + case elliptic.P521(): + hashFunc = crypto.SHA512 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA512 + default: + err = errors.New("x509: unknown elliptic curve") + } + + default: + err = errors.New("x509: only RSA and ECDSA keys supported") + } + + if err != nil { + return + } + + if requestedSigAlgo == 0 { + return + } + + found := false + for _, details := range signatureAlgorithmDetails { + if details.algo == requestedSigAlgo { + if details.pubKeyAlgo != pubType { + err = errors.New("x509: requested SignatureAlgorithm does not match private key type") + return + } + sigAlgo.Algorithm, hashFunc = details.oid, details.hash + if hashFunc == 0 { + err = errors.New("x509: cannot sign with hash function requested") + return + } + found = true + break + } + } + + if !found { + err = errors.New("x509: unknown SignatureAlgorithm") + } + + return +} + +// TODO(agl): this is taken from crypto/x509 and so should probably be exported +// from crypto/x509 or crypto/x509/pkix. +func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm { + for _, details := range signatureAlgorithmDetails { + if oid.Equal(details.oid) { + return details.algo + } + } + return x509.UnknownSignatureAlgorithm +} + +// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form. +func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash { + for hash, oid := range hashOIDs { + if oid.Equal(target) { + return hash + } + } + return crypto.Hash(0) +} + +// This is the exposed reflection of the internal OCSP structures. + +// The status values that can be expressed in OCSP. See RFC 6960. +const ( + // Good means that the certificate is valid. + Good = iota + // Revoked means that the certificate has been deliberately revoked. + Revoked + // Unknown means that the OCSP responder doesn't know about the certificate. + Unknown + // ServerFailed is unused and was never used (see + // https://go-review.googlesource.com/#/c/18944). ParseResponse will + // return a ResponseError when an error response is parsed. + ServerFailed +) + +// The enumerated reasons for revoking a certificate. See RFC 5280. +const ( + Unspecified = iota + KeyCompromise = iota + CACompromise = iota + AffiliationChanged = iota + Superseded = iota + CessationOfOperation = iota + CertificateHold = iota + _ = iota + RemoveFromCRL = iota + PrivilegeWithdrawn = iota + AACompromise = iota +) + +// Request represents an OCSP request. See RFC 6960. +type Request struct { + HashAlgorithm crypto.Hash + IssuerNameHash []byte + IssuerKeyHash []byte + SerialNumber *big.Int +} + +// Response represents an OCSP response containing a single SingleResponse. See +// RFC 6960. +type Response struct { + // Status is one of {Good, Revoked, Unknown} + Status int + SerialNumber *big.Int + ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time + RevocationReason int + Certificate *x509.Certificate + // TBSResponseData contains the raw bytes of the signed response. If + // Certificate is nil then this can be used to verify Signature. + TBSResponseData []byte + Signature []byte + SignatureAlgorithm x509.SignatureAlgorithm + + // Extensions contains raw X.509 extensions from the singleExtensions field + // of the OCSP response. When parsing certificates, this can be used to + // extract non-critical extensions that are not parsed by this package. When + // marshaling OCSP responses, the Extensions field is ignored, see + // ExtraExtensions. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any marshaled + // OCSP response (in the singleExtensions field). Values override any + // extensions that would otherwise be produced based on the other fields. The + // ExtraExtensions field is not populated when parsing certificates, see + // Extensions. + ExtraExtensions []pkix.Extension +} + +// These are pre-serialized error responses for the various non-success codes +// defined by OCSP. The Unauthorized code in particular can be used by an OCSP +// responder that supports only pre-signed responses as a response to requests +// for certificates with unknown status. See RFC 5019. +var ( + MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01} + InternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02} + TryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03} + SigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05} + UnauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06} +) + +// CheckSignatureFrom checks that the signature in resp is a valid signature +// from issuer. This should only be used if resp.Certificate is nil. Otherwise, +// the OCSP response contained an intermediate certificate that created the +// signature. That signature is checked by ParseResponse and only +// resp.Certificate remains to be validated. +func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error { + return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature) +} + +// ParseError results from an invalid OCSP response. +type ParseError string + +func (p ParseError) Error() string { + return string(p) +} + +// ParseRequest parses an OCSP request in DER form. It only supports +// requests for a single certificate. Signed requests are not supported. +// If a request includes a signature, it will result in a ParseError. +func ParseRequest(bytes []byte) (*Request, error) { + var req ocspRequest + rest, err := asn1.Unmarshal(bytes, &req) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in OCSP request") + } + + if len(req.TBSRequest.RequestList) == 0 { + return nil, ParseError("OCSP request contains no request body") + } + innerRequest := req.TBSRequest.RequestList[0] + + hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm) + if hashFunc == crypto.Hash(0) { + return nil, ParseError("OCSP request uses unknown hash function") + } + + return &Request{ + HashAlgorithm: hashFunc, + IssuerNameHash: innerRequest.Cert.NameHash, + IssuerKeyHash: innerRequest.Cert.IssuerKeyHash, + SerialNumber: innerRequest.Cert.SerialNumber, + }, nil +} + +// ParseResponse parses an OCSP response in DER form. It only supports +// responses for a single certificate. If the response contains a certificate +// then the signature over the response is checked. If issuer is not nil then +// it will be used to validate the signature or embedded certificate. +// +// Invalid signatures or parse failures will result in a ParseError. Error +// responses will result in a ResponseError. +func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) { + var resp responseASN1 + rest, err := asn1.Unmarshal(bytes, &resp) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in OCSP response") + } + + if status := ResponseStatus(resp.Status); status != Success { + return nil, ResponseError{status} + } + + if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) { + return nil, ParseError("bad OCSP response type") + } + + var basicResp basicResponse + rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp) + if err != nil { + return nil, err + } + + if len(basicResp.Certificates) > 1 { + return nil, ParseError("OCSP response contains bad number of certificates") + } + + if len(basicResp.TBSResponseData.Responses) != 1 { + return nil, ParseError("OCSP response contains bad number of responses") + } + + ret := &Response{ + TBSResponseData: basicResp.TBSResponseData.Raw, + Signature: basicResp.Signature.RightAlign(), + SignatureAlgorithm: getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm), + } + + if len(basicResp.Certificates) > 0 { + ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes) + if err != nil { + return nil, err + } + + if err := ret.CheckSignatureFrom(ret.Certificate); err != nil { + return nil, ParseError("bad OCSP signature") + } + + if issuer != nil { + if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil { + return nil, ParseError("bad signature on embedded certificate") + } + } + } else if issuer != nil { + if err := ret.CheckSignatureFrom(issuer); err != nil { + return nil, ParseError("bad OCSP signature") + } + } + + r := basicResp.TBSResponseData.Responses[0] + + for _, ext := range r.SingleExtensions { + if ext.Critical { + return nil, ParseError("unsupported critical extension") + } + } + ret.Extensions = r.SingleExtensions + + ret.SerialNumber = r.CertID.SerialNumber + + switch { + case bool(r.Good): + ret.Status = Good + case bool(r.Unknown): + ret.Status = Unknown + default: + ret.Status = Revoked + ret.RevokedAt = r.Revoked.RevocationTime + ret.RevocationReason = int(r.Revoked.Reason) + } + + ret.ProducedAt = basicResp.TBSResponseData.ProducedAt + ret.ThisUpdate = r.ThisUpdate + ret.NextUpdate = r.NextUpdate + + return ret, nil +} + +// RequestOptions contains options for constructing OCSP requests. +type RequestOptions struct { + // Hash contains the hash function that should be used when + // constructing the OCSP request. If zero, SHA-1 will be used. + Hash crypto.Hash +} + +func (opts *RequestOptions) hash() crypto.Hash { + if opts == nil || opts.Hash == 0 { + // SHA-1 is nearly universally used in OCSP. + return crypto.SHA1 + } + return opts.Hash +} + +// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If +// opts is nil then sensible defaults are used. +func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) { + hashFunc := opts.hash() + + // OCSP seems to be the only place where these raw hash identifiers are + // used. I took the following from + // http://msdn.microsoft.com/en-us/library/ff635603.aspx + var hashOID asn1.ObjectIdentifier + hashOID, ok := hashOIDs[hashFunc] + if !ok { + return nil, x509.ErrUnsupportedAlgorithm + } + + if !hashFunc.Available() { + return nil, x509.ErrUnsupportedAlgorithm + } + h := opts.hash().New() + + var publicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString + } + if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { + return nil, err + } + + h.Write(publicKeyInfo.PublicKey.RightAlign()) + issuerKeyHash := h.Sum(nil) + + h.Reset() + h.Write(issuer.RawSubject) + issuerNameHash := h.Sum(nil) + + return asn1.Marshal(ocspRequest{ + tbsRequest{ + Version: 0, + RequestList: []request{ + { + Cert: certID{ + pkix.AlgorithmIdentifier{ + Algorithm: hashOID, + Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, + }, + issuerNameHash, + issuerKeyHash, + cert.SerialNumber, + }, + }, + }, + }, + }) +} + +// CreateResponse returns a DER-encoded OCSP response with the specified contents. +// The fields in the response are populated as follows: +// +// The responder cert is used to populate the ResponderName field, and the certificate +// itself is provided alongside the OCSP response signature. +// +// The issuer cert is used to puplate the IssuerNameHash and IssuerKeyHash fields. +// (SHA-1 is used for the hash function; this is not configurable.) +// +// The template is used to populate the SerialNumber, RevocationStatus, RevokedAt, +// RevocationReason, ThisUpdate, and NextUpdate fields. +// +// The ProducedAt date is automatically set to the current date, to the nearest minute. +func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) { + var publicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString + } + if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { + return nil, err + } + + h := sha1.New() + h.Write(publicKeyInfo.PublicKey.RightAlign()) + issuerKeyHash := h.Sum(nil) + + h.Reset() + h.Write(issuer.RawSubject) + issuerNameHash := h.Sum(nil) + + innerResponse := singleResponse{ + CertID: certID{ + HashAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: hashOIDs[crypto.SHA1], + Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, + }, + NameHash: issuerNameHash, + IssuerKeyHash: issuerKeyHash, + SerialNumber: template.SerialNumber, + }, + ThisUpdate: template.ThisUpdate.UTC(), + NextUpdate: template.NextUpdate.UTC(), + SingleExtensions: template.ExtraExtensions, + } + + switch template.Status { + case Good: + innerResponse.Good = true + case Unknown: + innerResponse.Unknown = true + case Revoked: + innerResponse.Revoked = revokedInfo{ + RevocationTime: template.RevokedAt.UTC(), + Reason: asn1.Enumerated(template.RevocationReason), + } + } + + responderName := asn1.RawValue{ + Class: 2, // context-specific + Tag: 1, // explicit tag + IsCompound: true, + Bytes: responderCert.RawSubject, + } + tbsResponseData := responseData{ + Version: 0, + RawResponderName: responderName, + ProducedAt: time.Now().Truncate(time.Minute).UTC(), + Responses: []singleResponse{innerResponse}, + } + + tbsResponseDataDER, err := asn1.Marshal(tbsResponseData) + if err != nil { + return nil, err + } + + hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm) + if err != nil { + return nil, err + } + + responseHash := hashFunc.New() + responseHash.Write(tbsResponseDataDER) + signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc) + if err != nil { + return nil, err + } + + response := basicResponse{ + TBSResponseData: tbsResponseData, + SignatureAlgorithm: signatureAlgorithm, + Signature: asn1.BitString{ + Bytes: signature, + BitLength: 8 * len(signature), + }, + } + if template.Certificate != nil { + response.Certificates = []asn1.RawValue{ + asn1.RawValue{FullBytes: template.Certificate.Raw}, + } + } + responseDER, err := asn1.Marshal(response) + if err != nil { + return nil, err + } + + return asn1.Marshal(responseASN1{ + Status: asn1.Enumerated(Success), + Response: responseBytes{ + ResponseType: idPKIXOCSPBasic, + Response: responseDER, + }, + }) +} diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go new file mode 100644 index 000000000000..3daa8979e1d7 --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package idna implements IDNA2008 (Internationalized Domain Names for +// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and +// RFC 5894. +package idna // import "golang.org/x/net/idna" + +import ( + "strings" + "unicode/utf8" +) + +// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or +// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11 + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". +func ToASCII(s string) (string, error) { + if ascii(s) { + return s, nil + } + labels := strings.Split(s, ".") + for i, label := range labels { + if !ascii(label) { + a, err := encode(acePrefix, label) + if err != nil { + return "", err + } + labels[i] = a + } + } + return strings.Join(labels, "."), nil +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". +func ToUnicode(s string) (string, error) { + if !strings.Contains(s, acePrefix) { + return s, nil + } + labels := strings.Split(s, ".") + for i, label := range labels { + if strings.HasPrefix(label, acePrefix) { + u, err := decode(label[len(acePrefix):]) + if err != nil { + return "", err + } + labels[i] = u + } + } + return strings.Join(labels, "."), nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go new file mode 100644 index 000000000000..92e733f6a7b1 --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -0,0 +1,200 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "fmt" + "math" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +// decode decodes a string as specified in section 6.2. +func decode(encoded string) (string, error) { + if encoded == "" { + return "", nil + } + pos := 1 + strings.LastIndex(encoded, "-") + if pos == 1 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + if pos == len(encoded) { + return encoded[:len(encoded)-1], nil + } + output := make([]rune, 0, len(encoded)) + if pos != 0 { + for _, r := range encoded[:pos-1] { + output = append(output, r) + } + } + i, n, bias := int32(0), initialN, initialBias + for pos < len(encoded) { + oldI, w := i, int32(1) + for k := base; ; k += base { + if pos == len(encoded) { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + digit, ok := decodeDigit(encoded[pos]) + if !ok { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + pos++ + i += digit * w + if i < 0 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if digit < t { + break + } + w *= base - t + if w >= math.MaxInt32/base { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + } + x := int32(len(output) + 1) + bias = adapt(i-oldI, x, oldI == 0) + n += i / x + i %= x + if n > utf8.MaxRune || len(output) >= 1024 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + output = append(output, 0) + copy(output[i+1:], output[i:]) + output[i] = n + i++ + } + return string(output), nil +} + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", fmt.Errorf("idna: invalid label %q", s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", fmt.Errorf("idna: invalid label %q", s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func decodeDigit(x byte) (digit int32, ok bool) { + switch { + case '0' <= x && x <= '9': + return int32(x - ('0' - 26)), true + case 'A' <= x && x <= 'Z': + return int32(x - 'A'), true + case 'a' <= x && x <= 'z': + return int32(x - 'a'), true + } + return 0, false +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("idna: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go new file mode 100644 index 000000000000..add1c2e42ea9 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/gen.go @@ -0,0 +1,713 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This program generates table.go and table_test.go based on the authoritative +// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat +// +// The version is derived from +// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat +// and a human-readable form is at +// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat +// +// To fetch a particular git revision, such as 5c70ccd250, pass +// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" +// and -version "an explicit version string". + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "regexp" + "sort" + "strings" + + "golang.org/x/net/idna" +) + +const ( + // These sum of these four values must be no greater than 32. + nodesBitsChildren = 9 + nodesBitsICANN = 1 + nodesBitsTextOffset = 15 + nodesBitsTextLength = 6 + + // These sum of these four values must be no greater than 32. + childrenBitsWildcard = 1 + childrenBitsNodeType = 2 + childrenBitsHi = 14 + childrenBitsLo = 14 +) + +var ( + maxChildren int + maxTextOffset int + maxTextLength int + maxHi uint32 + maxLo uint32 +) + +func max(a, b int) int { + if a < b { + return b + } + return a +} + +func u32max(a, b uint32) uint32 { + if a < b { + return b + } + return a +} + +const ( + nodeTypeNormal = 0 + nodeTypeException = 1 + nodeTypeParentOnly = 2 + numNodeType = 3 +) + +func nodeTypeStr(n int) string { + switch n { + case nodeTypeNormal: + return "+" + case nodeTypeException: + return "!" + case nodeTypeParentOnly: + return "o" + } + panic("unreachable") +} + +const ( + defaultURL = "https://publicsuffix.org/list/effective_tld_names.dat" + gitCommitURL = "https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat" +) + +var ( + labelEncoding = map[string]uint32{} + labelsList = []string{} + labelsMap = map[string]bool{} + rules = []string{} + + // validSuffixRE is used to check that the entries in the public suffix + // list are in canonical form (after Punycode encoding). Specifically, + // capital letters are not allowed. + validSuffixRE = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) + + shaRE = regexp.MustCompile(`"sha":"([^"]+)"`) + dateRE = regexp.MustCompile(`"committer":{[^{]+"date":"([^"]+)"`) + + comments = flag.Bool("comments", false, "generate table.go comments, for debugging") + subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") + url = flag.String("url", defaultURL, "URL of the publicsuffix.org list. If empty, stdin is read instead") + v = flag.Bool("v", false, "verbose output (to stderr)") + version = flag.String("version", "", "the effective_tld_names.dat version") +) + +func main() { + if err := main1(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func main1() error { + flag.Parse() + if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { + return fmt.Errorf("not enough bits to encode the nodes table") + } + if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { + return fmt.Errorf("not enough bits to encode the children table") + } + if *version == "" { + if *url != defaultURL { + return fmt.Errorf("-version was not specified, and the -url is not the default one") + } + sha, date, err := gitCommit() + if err != nil { + return err + } + *version = fmt.Sprintf("publicsuffix.org's public_suffix_list.dat, git revision %s (%s)", sha, date) + } + var r io.Reader = os.Stdin + if *url != "" { + res, err := http.Get(*url) + if err != nil { + return err + } + if res.StatusCode != http.StatusOK { + return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) + } + r = res.Body + defer res.Body.Close() + } + + var root node + icann := false + br := bufio.NewReader(r) + for { + s, err := br.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + return err + } + s = strings.TrimSpace(s) + if strings.Contains(s, "BEGIN ICANN DOMAINS") { + icann = true + continue + } + if strings.Contains(s, "END ICANN DOMAINS") { + icann = false + continue + } + if s == "" || strings.HasPrefix(s, "//") { + continue + } + s, err = idna.ToASCII(s) + if err != nil { + return err + } + if !validSuffixRE.MatchString(s) { + return fmt.Errorf("bad publicsuffix.org list data: %q", s) + } + + if *subset { + switch { + case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): + case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): + case s == "ao" || strings.HasSuffix(s, ".ao"): + case s == "ar" || strings.HasSuffix(s, ".ar"): + case s == "arpa" || strings.HasSuffix(s, ".arpa"): + case s == "cy" || strings.HasSuffix(s, ".cy"): + case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): + case s == "jp": + case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): + case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): + case s == "om" || strings.HasSuffix(s, ".om"): + case s == "uk" || strings.HasSuffix(s, ".uk"): + case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): + case s == "tw" || strings.HasSuffix(s, ".tw"): + case s == "zw" || strings.HasSuffix(s, ".zw"): + case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): + // xn--p1ai is Russian-Cyrillic "рф". + default: + continue + } + } + + rules = append(rules, s) + + nt, wildcard := nodeTypeNormal, false + switch { + case strings.HasPrefix(s, "*."): + s, nt = s[2:], nodeTypeParentOnly + wildcard = true + case strings.HasPrefix(s, "!"): + s, nt = s[1:], nodeTypeException + } + labels := strings.Split(s, ".") + for n, i := &root, len(labels)-1; i >= 0; i-- { + label := labels[i] + n = n.child(label) + if i == 0 { + if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { + n.nodeType = nt + } + n.icann = n.icann && icann + n.wildcard = n.wildcard || wildcard + } + labelsMap[label] = true + } + } + labelsList = make([]string, 0, len(labelsMap)) + for label := range labelsMap { + labelsList = append(labelsList, label) + } + sort.Strings(labelsList) + + if err := generate(printReal, &root, "table.go"); err != nil { + return err + } + if err := generate(printTest, &root, "table_test.go"); err != nil { + return err + } + return nil +} + +func generate(p func(io.Writer, *node) error, root *node, filename string) error { + buf := new(bytes.Buffer) + if err := p(buf, root); err != nil { + return err + } + b, err := format.Source(buf.Bytes()) + if err != nil { + return err + } + return ioutil.WriteFile(filename, b, 0644) +} + +func gitCommit() (sha, date string, retErr error) { + res, err := http.Get(gitCommitURL) + if err != nil { + return "", "", err + } + if res.StatusCode != http.StatusOK { + return "", "", fmt.Errorf("bad GET status for %s: %d", gitCommitURL, res.Status) + } + defer res.Body.Close() + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if m := shaRE.FindSubmatch(b); m != nil { + sha = string(m[1]) + } + if m := dateRE.FindSubmatch(b); m != nil { + date = string(m[1]) + } + if sha == "" || date == "" { + retErr = fmt.Errorf("could not find commit SHA and date in %s", gitCommitURL) + } + return sha, date, retErr +} + +func printTest(w io.Writer, n *node) error { + fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") + fmt.Fprintf(w, "package publicsuffix\n\nvar rules = [...]string{\n") + for _, rule := range rules { + fmt.Fprintf(w, "%q,\n", rule) + } + fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") + if err := n.walk(w, printNodeLabel); err != nil { + return err + } + fmt.Fprintf(w, "}\n") + return nil +} + +func printReal(w io.Writer, n *node) error { + const header = `// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +const version = %q + +const ( + nodesBitsChildren = %d + nodesBitsICANN = %d + nodesBitsTextOffset = %d + nodesBitsTextLength = %d + + childrenBitsWildcard = %d + childrenBitsNodeType = %d + childrenBitsHi = %d + childrenBitsLo = %d +) + +const ( + nodeTypeNormal = %d + nodeTypeException = %d + nodeTypeParentOnly = %d +) + +// numTLD is the number of top level domains. +const numTLD = %d + +` + fmt.Fprintf(w, header, *version, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, + nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) + + text := combineText(labelsList) + if text == "" { + return fmt.Errorf("internal error: makeText returned no text") + } + for _, label := range labelsList { + offset, length := strings.Index(text, label), len(label) + if offset < 0 { + return fmt.Errorf("internal error: could not find %q in text %q", label, text) + } + maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) + if offset >= 1<= 1< 64 { + n, plus = 64, " +" + } + fmt.Fprintf(w, "%q%s\n", text[:n], plus) + text = text[n:] + } + + if err := n.walk(w, assignIndexes); err != nil { + return err + } + + fmt.Fprintf(w, ` + +// nodes is the list of nodes. Each node is represented as a uint32, which +// encodes the node's children, wildcard bit and node type (as an index into +// the children array), ICANN bit and text. +// +// If the table was generated with the -comments flag, there is a //-comment +// after each node's data. In it is the nodes-array indexes of the children, +// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// nodeType is printed as + for normal, ! for exception, and o for parent-only +// nodes that have children but don't match a domain label in their own right. +// An I denotes an ICANN domain. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] children index +// [%2d bits] ICANN bit +// [%2d bits] text index +// [%2d bits] text length +var nodes = [...]uint32{ +`, + 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) + if err := n.walk(w, printNode); err != nil { + return err + } + fmt.Fprintf(w, `} + +// children is the list of nodes' children, the parent's wildcard bit and the +// parent's node type. If a node has no children then their children index +// will be in the range [0, 6), depending on the wildcard bit and node type. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] wildcard bit +// [%2d bits] node type +// [%2d bits] high nodes index (exclusive) of children +// [%2d bits] low nodes index (inclusive) of children +var children=[...]uint32{ +`, + 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) + for i, c := range childrenEncoding { + s := "---------------" + lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 + if *comments { + fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", + c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) + } else { + fmt.Fprintf(w, "0x%08x,\n", c) + } + } + fmt.Fprintf(w, "}\n\n") + fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" { + ss = ss[1:] + } + return ss +} + +// crush combines a list of strings, taking advantage of overlaps. It returns a +// single string that contains each input string as a substring. +func crush(ss []string) string { + maxLabelLen := 0 + for _, s := range ss { + if maxLabelLen < len(s) { + maxLabelLen = len(s) + } + } + + for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { + prefixes := makePrefixMap(ss, prefixLen) + for i, s := range ss { + if len(s) <= prefixLen { + continue + } + mergeLabel(ss, i, prefixLen, prefixes) + } + } + + return strings.Join(ss, "") +} + +// mergeLabel merges the label at ss[i] with the first available matching label +// in prefixMap, where the last "prefixLen" characters in ss[i] match the first +// "prefixLen" characters in the matching label. +// It will merge ss[i] repeatedly until no more matches are available. +// All matching labels merged into ss[i] are replaced by "". +func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { + s := ss[i] + suffix := s[len(s)-prefixLen:] + for _, j := range prefixes[suffix] { + // Empty strings mean "already used." Also avoid merging with self. + if ss[j] == "" || i == j { + continue + } + if *v { + fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", + prefixLen, i, j, ss[i], ss[j], suffix) + } + ss[i] += ss[j][prefixLen:] + ss[j] = "" + // ss[i] has a new suffix, so merge again if possible. + // Note: we only have to merge again at the same prefix length. Shorter + // prefix lengths will be handled in the next iteration of crush's for loop. + // Can there be matches for longer prefix lengths, introduced by the merge? + // I believe that any such matches would by necessity have been eliminated + // during substring removal or merged at a higher prefix length. For + // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" + // would yield "abcde", which could be merged with "bcdef." However, in + // practice "cde" would already have been elimintated by removeSubstrings. + mergeLabel(ss, i, prefixLen, prefixes) + return + } +} + +// prefixMap maps from a prefix to a list of strings containing that prefix. The +// list of strings is represented as indexes into a slice of strings stored +// elsewhere. +type prefixMap map[string][]int + +// makePrefixMap constructs a prefixMap from a slice of strings. +func makePrefixMap(ss []string, prefixLen int) prefixMap { + prefixes := make(prefixMap) + for i, s := range ss { + // We use < rather than <= because if a label matches on a prefix equal to + // its full length, that's actually a substring match handled by + // removeSubstrings. + if prefixLen < len(s) { + prefix := s[:prefixLen] + prefixes[prefix] = append(prefixes[prefix], i) + } + } + + return prefixes +} diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go new file mode 100644 index 000000000000..8bbf3bcd7efd --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -0,0 +1,135 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package publicsuffix provides a public suffix list based on data from +// http://publicsuffix.org/. A public suffix is one under which Internet users +// can directly register names. +package publicsuffix // import "golang.org/x/net/publicsuffix" + +// TODO: specify case sensitivity and leading/trailing dot behavior for +// func PublicSuffix and func EffectiveTLDPlusOne. + +import ( + "fmt" + "net/http/cookiejar" + "strings" +) + +// List implements the cookiejar.PublicSuffixList interface by calling the +// PublicSuffix function. +var List cookiejar.PublicSuffixList = list{} + +type list struct{} + +func (list) PublicSuffix(domain string) string { + ps, _ := PublicSuffix(domain) + return ps +} + +func (list) String() string { + return version +} + +// PublicSuffix returns the public suffix of the domain using a copy of the +// publicsuffix.org database compiled into the library. +// +// icann is whether the public suffix is managed by the Internet Corporation +// for Assigned Names and Numbers. If not, the public suffix is privately +// managed. For example, foo.org and foo.co.uk are ICANN domains, +// foo.dyndns.org and foo.blogspot.co.uk are private domains. +// +// Use cases for distinguishing ICANN domains like foo.com from private +// domains like foo.appspot.com can be found at +// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + lo, hi := uint32(0), uint32(numTLD) + s, suffix, wildcard := domain, len(domain), false +loop: + for { + dot := strings.LastIndex(s, ".") + if wildcard { + suffix = 1 + dot + } + if lo == hi { + break + } + f := find(s[1+dot:], lo, hi) + if f == notFound { + break + } + + u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) + icann = u&(1<>= nodesBitsICANN + u = children[u&(1<>= childrenBitsLo + hi = u & (1<>= childrenBitsHi + switch u & (1<>= childrenBitsNodeType + wildcard = u&(1<>= nodesBitsTextLength + offset := x & (1<. + diff --git a/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md b/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md new file mode 100644 index 000000000000..61b183651c0d --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md @@ -0,0 +1,14 @@ +# Contributing + +If you would like to contribute code to go-jose you can do so through GitHub by +forking the repository and sending a pull request. + +When submitting code, please make every effort to follow existing conventions +and style in order to keep the code as readable as possible. Please also make +sure all tests pass by running `go test`, and format your code with `go fmt`. +We also recommend using `golint` and `errcheck`. + +Before your code can be accepted into the project you must also sign the +[Individual Contributor License Agreement][1]. + + [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1 diff --git a/vendor/gopkg.in/square/go-jose.v1/LICENSE b/vendor/gopkg.in/square/go-jose.v1/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v1/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/square/go-jose.v1/README.md b/vendor/gopkg.in/square/go-jose.v1/README.md new file mode 100644 index 000000000000..fd859da7a919 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v1/README.md @@ -0,0 +1,209 @@ +# Go JOSE + +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) [![license](http://img.shields.io/badge/license-apache_2.0-red.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) [![build](https://travis-ci.org/square/go-jose.svg?branch=master)](https://travis-ci.org/square/go-jose) [![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=master)](https://coveralls.io/r/square/go-jose) + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. For the moment, it mainly focuses on encryption +and signing based on the JSON Web Encryption and JSON Web Signature standards. + +**Disclaimer**: This library contains encryption software that is subject to +the U.S. Export Administration Regulations. You may not export, re-export, +transfer or download this code or any part of it in violation of any United +States law, directive or regulation. In particular this software may not be +exported or re-exported in any form or on any media to Iran, North Sudan, +Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any +US maintained blocked list. + +## Overview + +The implementation follows the +[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) +standard (RFC 7516) and +[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) +standard (RFC 7515). Tables of supported algorithms are shown below. +The library supports both the compact and full serialization formats, and has +optional support for multiple recipients. It also comes with a small +command-line utility +([`jose-util`](https://github.com/square/go-jose/tree/master/jose-util)) +for dealing with JOSE messages in a shell. + +**Note**: We use a forked version of the `encoding/json` package from the Go +standard library which uses case-sensitive matching for member names (instead +of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). +This is to avoid differences in interpretation of messages between go-jose and +libraries in other languages. If you do not like this behavior, you can use the +`std_json` build tag to disable it (though we do not recommend doing so). + +### Versions + +We use [gopkg.in](https://gopkg.in) for versioning. + +[Version 1](https://gopkg.in/square/go-jose.v1) is the current stable version: + + import "gopkg.in/square/go-jose.v1" + +The interface for [go-jose.v1](https://gopkg.in/square/go-jose.v1) will remain +backwards compatible. We're currently sketching out ideas for a new version, to +clean up the interface a bit. If you have ideas or feature requests [please let +us know](https://github.com/square/go-jose/issues/64)! + +### Supported algorithms + +See below for a table of supported algorithms. Algorithm identifiers match +the names in the +[JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) +standard where possible. The +[Godoc reference](https://godoc.org/github.com/square/go-jose#pkg-constants) +has a list of constants. + + Key encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSA-PKCS#1v1.5 | RSA1_5 + RSA-OAEP | RSA-OAEP, RSA-OAEP-256 + AES key wrap | A128KW, A192KW, A256KW + AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW + ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW + ECDH-ES (direct) | ECDH-ES1 + Direct encryption | dir1 + +1. Not supported in multi-recipient mode + + Signing / MAC | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 + RSASSA-PSS | PS256, PS384, PS512 + HMAC | HS256, HS384, HS512 + ECDSA | ES256, ES384, ES512 + + Content encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 + AES-GCM | A128GCM, A192GCM, A256GCM + + Compression | Algorithm identifiers(s) + :------------------------- | ------------------------------- + DEFLATE (RFC 1951) | DEF + +### Supported key types + +See below for a table of supported key types. These are understood by the +library, and can be passed to corresponding functions such as `NewEncrypter` or +`NewSigner`. Note that if you are creating a new encrypter or signer with a +JsonWebKey, the key id of the JsonWebKey (if present) will be added to any +resulting messages. + + Algorithm(s) | Corresponding types + :------------------------- | ------------------------------- + RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) + ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) + AES, HMAC | []byte, *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) + +## Examples + +Encryption/decryption example using RSA: + +```Go +// Generate a public/private key pair to use for this example. The library +// also provides two utility functions (LoadPublicKey and LoadPrivateKey) +// that can be used to load keys from PEM/DER-encoded data. +privateKey, err := rsa.GenerateKey(rand.Reader, 2048) +if err != nil { + panic(err) +} + +// Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would +// indicate that the selected algorithm(s) are not currently supported. +publicKey := &privateKey.PublicKey +encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey) +if err != nil { + panic(err) +} + +// Encrypt a sample plaintext. Calling the encrypter returns an encrypted +// JWE object, which can then be serialized for output afterwards. An error +// would indicate a problem in an underlying cryptographic primitive. +var plaintext = []byte("Lorem ipsum dolor sit amet") +object, err := encrypter.Encrypt(plaintext) +if err != nil { + panic(err) +} + +// Serialize the encrypted object using the full serialization format. +// Alternatively you can also use the compact format here by calling +// object.CompactSerialize() instead. +serialized := object.FullSerialize() + +// Parse the serialized, encrypted JWE object. An error would indicate that +// the given input did not represent a valid message. +object, err = ParseEncrypted(serialized) +if err != nil { + panic(err) +} + +// Now we can decrypt and get back our original plaintext. An error here +// would indicate the the message failed to decrypt, e.g. because the auth +// tag was broken or the message was tampered with. +decrypted, err := object.Decrypt(privateKey) +if err != nil { + panic(err) +} + +fmt.Printf(string(decrypted)) +// output: Lorem ipsum dolor sit amet +``` + +Signing/verification example using RSA: + +```Go +// Generate a public/private key pair to use for this example. The library +// also provides two utility functions (LoadPublicKey and LoadPrivateKey) +// that can be used to load keys from PEM/DER-encoded data. +privateKey, err := rsa.GenerateKey(rand.Reader, 2048) +if err != nil { + panic(err) +} + +// Instantiate a signer using RSASSA-PSS (SHA512) with the given private key. +signer, err := NewSigner(PS512, privateKey) +if err != nil { + panic(err) +} + +// Sign a sample payload. Calling the signer returns a protected JWS object, +// which can then be serialized for output afterwards. An error would +// indicate a problem in an underlying cryptographic primitive. +var payload = []byte("Lorem ipsum dolor sit amet") +object, err := signer.Sign(payload) +if err != nil { + panic(err) +} + +// Serialize the encrypted object using the full serialization format. +// Alternatively you can also use the compact format here by calling +// object.CompactSerialize() instead. +serialized := object.FullSerialize() + +// Parse the serialized, protected JWS object. An error would indicate that +// the given input did not represent a valid message. +object, err = ParseSigned(serialized) +if err != nil { + panic(err) +} + +// Now we can verify the signature on the payload. An error here would +// indicate the the message failed to verify, e.g. because the signature was +// broken or the message was tampered with. +output, err := object.Verify(&privateKey.PublicKey) +if err != nil { + panic(err) +} + +fmt.Printf(string(output)) +// output: Lorem ipsum dolor sit amet +``` + +More examples can be found in the [Godoc +reference](https://godoc.org/github.com/square/go-jose) for this package. The +[`jose-util`](https://github.com/square/go-jose/tree/master/jose-util) +subdirectory also contains a small command-line utility which might +be useful as an example. diff --git a/vendor/gopkg.in/square/go-jose.v1/asymmetric.go b/vendor/gopkg.in/square/go-jose.v1/asymmetric.go new file mode 100644 index 000000000000..381156caba7e --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v1/asymmetric.go @@ -0,0 +1,498 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto" + "crypto/aes" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "errors" + "fmt" + "math/big" + + "gopkg.in/square/go-jose.v1/cipher" +) + +// A generic RSA-based encrypter/verifier +type rsaEncrypterVerifier struct { + publicKey *rsa.PublicKey +} + +// A generic RSA-based decrypter/signer +type rsaDecrypterSigner struct { + privateKey *rsa.PrivateKey +} + +// A generic EC-based encrypter/verifier +type ecEncrypterVerifier struct { + publicKey *ecdsa.PublicKey +} + +// A key generator for ECDH-ES +type ecKeyGenerator struct { + size int + algID string + publicKey *ecdsa.PublicKey +} + +// A generic EC-based decrypter/signer +type ecDecrypterSigner struct { + privateKey *ecdsa.PrivateKey +} + +// newRSARecipient creates recipientKeyInfo based on the given key. +func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case RSA1_5, RSA_OAEP, RSA_OAEP_256: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &rsaEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newRSASigner creates a recipientSigInfo based on the given key. +func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case RS256, RS384, RS512, PS256, PS384, PS512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: &JsonWebKey{ + Key: &privateKey.PublicKey, + }, + signer: &rsaDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// newECDHRecipient creates recipientKeyInfo based on the given key. +func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &ecEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newECDSASigner creates a recipientSigInfo based on the given key. +func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case ES256, ES384, ES512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: &JsonWebKey{ + Key: &privateKey.PublicKey, + }, + signer: &ecDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// Encrypt the given payload and update the object. +func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + encryptedKey, err := ctx.encrypt(cek, alg) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: encryptedKey, + header: &rawHeader{}, + }, nil +} + +// Encrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { + switch alg { + case RSA1_5: + return rsa.EncryptPKCS1v15(randReader, ctx.publicKey, cek) + case RSA_OAEP: + return rsa.EncryptOAEP(sha1.New(), randReader, ctx.publicKey, cek, []byte{}) + case RSA_OAEP_256: + return rsa.EncryptOAEP(sha256.New(), randReader, ctx.publicKey, cek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Decrypt the given payload and return the content encryption key. +func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + return ctx.decrypt(recipient.encryptedKey, KeyAlgorithm(headers.Alg), generator) +} + +// Decrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { + // Note: The random reader on decrypt operations is only used for blinding, + // so stubbing is meanlingless (hence the direct use of rand.Reader). + switch alg { + case RSA1_5: + defer func() { + // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload + // because of an index out of bounds error, which we want to ignore. + // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() + // only exists for preventing crashes with unpatched versions. + // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k + // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 + _ = recover() + }() + + // Perform some input validation. + keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 + if keyBytes != len(jek) { + // Input size is incorrect, the encrypted payload should always match + // the size of the public modulus (e.g. using a 2048 bit key will + // produce 256 bytes of output). Reject this since it's invalid input. + return nil, ErrCryptoFailure + } + + cek, _, err := generator.genKey() + if err != nil { + return nil, ErrCryptoFailure + } + + // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to + // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing + // the Million Message Attack on Cryptographic Message Syntax". We are + // therefore deliberatly ignoring errors here. + _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) + + return cek, nil + case RSA_OAEP: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + case RSA_OAEP_256: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Sign the given payload +func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return Signature{}, ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + var out []byte + var err error + + switch alg { + case RS256, RS384, RS512: + out, err = rsa.SignPKCS1v15(randReader, ctx.privateKey, hash, hashed) + case PS256, PS384, PS512: + out, err = rsa.SignPSS(randReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }) + } + + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + switch alg { + case RS256, RS384, RS512: + return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) + case PS256, PS384, PS512: + return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) + } + + return ErrUnsupportedAlgorithm +} + +// Encrypt the given payload and update the object. +func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + switch alg { + case ECDH_ES: + // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. + return recipientInfo{ + header: &rawHeader{}, + }, nil + case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientInfo{}, ErrUnsupportedAlgorithm + } + + generator := ecKeyGenerator{ + algID: string(alg), + publicKey: ctx.publicKey, + } + + switch alg { + case ECDH_ES_A128KW: + generator.size = 16 + case ECDH_ES_A192KW: + generator.size = 24 + case ECDH_ES_A256KW: + generator.size = 32 + } + + kek, header, err := generator.genKey() + if err != nil { + return recipientInfo{}, err + } + + block, err := aes.NewCipher(kek) + if err != nil { + return recipientInfo{}, err + } + + jek, err := josecipher.KeyWrap(block, cek) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: jek, + header: &header, + }, nil +} + +// Get key size for EC key generator +func (ctx ecKeyGenerator) keySize() int { + return ctx.size +} + +// Get a content encryption key for ECDH-ES +func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { + priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, randReader) + if err != nil { + return nil, rawHeader{}, err + } + + out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) + + headers := rawHeader{ + Epk: &JsonWebKey{ + Key: &priv.PublicKey, + }, + } + + return out, headers, nil +} + +// Decrypt the given payload and return the content encryption key. +func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + if headers.Epk == nil { + return nil, errors.New("square/go-jose: missing epk header") + } + + publicKey, ok := headers.Epk.Key.(*ecdsa.PublicKey) + if publicKey == nil || !ok { + return nil, errors.New("square/go-jose: invalid epk header") + } + + apuData := headers.Apu.bytes() + apvData := headers.Apv.bytes() + + deriveKey := func(algID string, size int) []byte { + return josecipher.DeriveECDHES(algID, apuData, apvData, ctx.privateKey, publicKey, size) + } + + var keySize int + + switch KeyAlgorithm(headers.Alg) { + case ECDH_ES: + // ECDH-ES uses direct key agreement, no key unwrapping necessary. + return deriveKey(string(headers.Enc), generator.keySize()), nil + case ECDH_ES_A128KW: + keySize = 16 + case ECDH_ES_A192KW: + keySize = 24 + case ECDH_ES_A256KW: + keySize = 32 + default: + return nil, ErrUnsupportedAlgorithm + } + + key := deriveKey(headers.Alg, keySize) + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + return josecipher.KeyUnwrap(block, recipient.encryptedKey) +} + +// Sign the given payload +func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var expectedBitSize int + var hash crypto.Hash + + switch alg { + case ES256: + expectedBitSize = 256 + hash = crypto.SHA256 + case ES384: + expectedBitSize = 384 + hash = crypto.SHA384 + case ES512: + expectedBitSize = 521 + hash = crypto.SHA512 + } + + curveBits := ctx.privateKey.Curve.Params().BitSize + if expectedBitSize != curveBits { + return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(randReader, ctx.privateKey, hashed) + if err != nil { + return Signature{}, err + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var keySize int + var hash crypto.Hash + + switch alg { + case ES256: + keySize = 32 + hash = crypto.SHA256 + case ES384: + keySize = 48 + hash = crypto.SHA384 + case ES512: + keySize = 66 + hash = crypto.SHA512 + } + + if len(signature) != 2*keySize { + return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r := big.NewInt(0).SetBytes(signature[:keySize]) + s := big.NewInt(0).SetBytes(signature[keySize:]) + + match := ecdsa.Verify(ctx.publicKey, hashed, r, s) + if !match { + return errors.New("square/go-jose: ecdsa signature failed to verify") + } + + return nil +} diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go b/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go new file mode 100644 index 000000000000..a5c358340ffd --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go @@ -0,0 +1,196 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "crypto/subtle" + "encoding/binary" + "errors" + "hash" +) + +const ( + nonceBytes = 16 +) + +// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. +func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { + keySize := len(key) / 2 + integrityKey := key[:keySize] + encryptionKey := key[keySize:] + + blockCipher, err := newBlockCipher(encryptionKey) + if err != nil { + return nil, err + } + + var hash func() hash.Hash + switch keySize { + case 16: + hash = sha256.New + case 24: + hash = sha512.New384 + case 32: + hash = sha512.New + } + + return &cbcAEAD{ + hash: hash, + blockCipher: blockCipher, + authtagBytes: keySize, + integrityKey: integrityKey, + }, nil +} + +// An AEAD based on CBC+HMAC +type cbcAEAD struct { + hash func() hash.Hash + authtagBytes int + integrityKey []byte + blockCipher cipher.Block +} + +func (ctx *cbcAEAD) NonceSize() int { + return nonceBytes +} + +func (ctx *cbcAEAD) Overhead() int { + // Maximum overhead is block size (for padding) plus auth tag length, where + // the length of the auth tag is equivalent to the key size. + return ctx.blockCipher.BlockSize() + ctx.authtagBytes +} + +// Seal encrypts and authenticates the plaintext. +func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { + // Output buffer -- must take care not to mangle plaintext input. + ciphertext := make([]byte, len(plaintext)+ctx.Overhead())[:len(plaintext)] + copy(ciphertext, plaintext) + ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) + + cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) + + cbc.CryptBlocks(ciphertext, ciphertext) + authtag := ctx.computeAuthTag(data, nonce, ciphertext) + + ret, out := resize(dst, len(dst)+len(ciphertext)+len(authtag)) + copy(out, ciphertext) + copy(out[len(ciphertext):], authtag) + + return ret +} + +// Open decrypts and authenticates the ciphertext. +func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { + if len(ciphertext) < ctx.authtagBytes { + return nil, errors.New("square/go-jose: invalid ciphertext (too short)") + } + + offset := len(ciphertext) - ctx.authtagBytes + expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) + match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) + if match != 1 { + return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)") + } + + cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) + + // Make copy of ciphertext buffer, don't want to modify in place + buffer := append([]byte{}, []byte(ciphertext[:offset])...) + + if len(buffer)%ctx.blockCipher.BlockSize() > 0 { + return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)") + } + + cbc.CryptBlocks(buffer, buffer) + + // Remove padding + plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) + if err != nil { + return nil, err + } + + ret, out := resize(dst, len(dst)+len(plaintext)) + copy(out, plaintext) + + return ret, nil +} + +// Compute an authentication tag +func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { + buffer := make([]byte, len(aad)+len(nonce)+len(ciphertext)+8) + n := 0 + n += copy(buffer, aad) + n += copy(buffer[n:], nonce) + n += copy(buffer[n:], ciphertext) + binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad)*8)) + + // According to documentation, Write() on hash.Hash never fails. + hmac := hmac.New(ctx.hash, ctx.integrityKey) + _, _ = hmac.Write(buffer) + + return hmac.Sum(nil)[:ctx.authtagBytes] +} + +// resize ensures the the given slice has a capacity of at least n bytes. +// If the capacity of the slice is less than n, a new slice is allocated +// and the existing data will be copied. +func resize(in []byte, n int) (head, tail []byte) { + if cap(in) >= n { + head = in[:n] + } else { + head = make([]byte, n) + copy(head, in) + } + + tail = head[len(in):] + return +} + +// Apply padding +func padBuffer(buffer []byte, blockSize int) []byte { + missing := blockSize - (len(buffer) % blockSize) + ret, out := resize(buffer, len(buffer)+missing) + padding := bytes.Repeat([]byte{byte(missing)}, missing) + copy(out, padding) + return ret +} + +// Remove padding +func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { + if len(buffer)%blockSize != 0 { + return nil, errors.New("square/go-jose: invalid padding") + } + + last := buffer[len(buffer)-1] + count := int(last) + + if count == 0 || count > blockSize || count > len(buffer) { + return nil, errors.New("square/go-jose: invalid padding") + } + + padding := bytes.Repeat([]byte{last}, count) + if !bytes.HasSuffix(buffer, padding) { + return nil, errors.New("square/go-jose: invalid padding") + } + + return buffer[:len(buffer)-count], nil +} diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go b/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go new file mode 100644 index 000000000000..cbb5f7b88e2c --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go @@ -0,0 +1,75 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto" + "encoding/binary" + "hash" + "io" +) + +type concatKDF struct { + z, info []byte + i uint32 + cache []byte + hasher hash.Hash +} + +// NewConcatKDF builds a KDF reader based on the given inputs. +func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { + buffer := make([]byte, len(algID)+len(ptyUInfo)+len(ptyVInfo)+len(supPubInfo)+len(supPrivInfo)) + n := 0 + n += copy(buffer, algID) + n += copy(buffer[n:], ptyUInfo) + n += copy(buffer[n:], ptyVInfo) + n += copy(buffer[n:], supPubInfo) + copy(buffer[n:], supPrivInfo) + + hasher := hash.New() + + return &concatKDF{ + z: z, + info: buffer, + hasher: hasher, + cache: []byte{}, + i: 1, + } +} + +func (ctx *concatKDF) Read(out []byte) (int, error) { + copied := copy(out, ctx.cache) + ctx.cache = ctx.cache[copied:] + + for copied < len(out) { + ctx.hasher.Reset() + + // Write on a hash.Hash never fails + _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) + _, _ = ctx.hasher.Write(ctx.z) + _, _ = ctx.hasher.Write(ctx.info) + + hash := ctx.hasher.Sum(nil) + chunkCopied := copy(out[copied:], hash) + copied += chunkCopied + ctx.cache = hash[chunkCopied:] + + ctx.i++ + } + + return copied, nil +} diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go b/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go new file mode 100644 index 000000000000..c6a5a82153de --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go @@ -0,0 +1,51 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto" + "crypto/ecdsa" + "encoding/binary" +) + +// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. +func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { + // algId, partyUInfo, partyVInfo inputs must be prefixed with the length + algID := lengthPrefixed([]byte(alg)) + ptyUInfo := lengthPrefixed(apuData) + ptyVInfo := lengthPrefixed(apvData) + + // suppPubInfo is the encoded length of the output size in bits + supPubInfo := make([]byte, 4) + binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) + + z, _ := priv.PublicKey.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) + reader := NewConcatKDF(crypto.SHA256, z.Bytes(), algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) + + key := make([]byte, size) + + // Read on the KDF will never fail + _, _ = reader.Read(key) + return key +} + +func lengthPrefixed(data []byte) []byte { + out := make([]byte, len(data)+4) + binary.BigEndian.PutUint32(out, uint32(len(data))) + copy(out[4:], data) + return out +} diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go b/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go new file mode 100644 index 000000000000..1d36d5015108 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go @@ -0,0 +1,109 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto/cipher" + "crypto/subtle" + "encoding/binary" + "errors" +) + +var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} + +// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. +func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { + if len(cek)%8 != 0 { + return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") + } + + n := len(cek) / 8 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], cek[i*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer, defaultIV) + + for t := 0; t < 6*n; t++ { + copy(buffer[8:], r[t%n]) + + block.Encrypt(buffer, buffer) + + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] = buffer[i] ^ tBytes[i] + } + copy(r[t%n], buffer[8:]) + } + + out := make([]byte, (n+1)*8) + copy(out, buffer[:8]) + for i := range r { + copy(out[(i+1)*8:], r[i]) + } + + return out, nil +} + +// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. +func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { + if len(ciphertext)%8 != 0 { + return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") + } + + n := (len(ciphertext) / 8) - 1 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], ciphertext[(i+1)*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer[:8], ciphertext[:8]) + + for t := 6*n - 1; t >= 0; t-- { + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] = buffer[i] ^ tBytes[i] + } + copy(buffer[8:], r[t%n]) + + block.Decrypt(buffer, buffer) + + copy(r[t%n], buffer[8:]) + } + + if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { + return nil, errors.New("square/go-jose: failed to unwrap key") + } + + out := make([]byte, n*8) + for i := range r { + copy(out[i*8:], r[i]) + } + + return out, nil +} diff --git a/vendor/gopkg.in/square/go-jose.v1/crypter.go b/vendor/gopkg.in/square/go-jose.v1/crypter.go new file mode 100644 index 000000000000..f61af2c0efaa --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v1/crypter.go @@ -0,0 +1,349 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/ecdsa" + "crypto/rsa" + "fmt" + "reflect" +) + +// Encrypter represents an encrypter which produces an encrypted JWE object. +type Encrypter interface { + Encrypt(plaintext []byte) (*JsonWebEncryption, error) + EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error) + SetCompression(alg CompressionAlgorithm) +} + +// MultiEncrypter represents an encrypter which supports multiple recipients. +type MultiEncrypter interface { + Encrypt(plaintext []byte) (*JsonWebEncryption, error) + EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error) + SetCompression(alg CompressionAlgorithm) + AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) error +} + +// A generic content cipher +type contentCipher interface { + keySize() int + encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) + decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) +} + +// A key generator (for generating/getting a CEK) +type keyGenerator interface { + keySize() int + genKey() ([]byte, rawHeader, error) +} + +// A generic key encrypter +type keyEncrypter interface { + encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key +} + +// A generic key decrypter +type keyDecrypter interface { + decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key +} + +// A generic encrypter based on the given key encrypter and content cipher. +type genericEncrypter struct { + contentAlg ContentEncryption + compressionAlg CompressionAlgorithm + cipher contentCipher + recipients []recipientKeyInfo + keyGenerator keyGenerator +} + +type recipientKeyInfo struct { + keyID string + keyAlg KeyAlgorithm + keyEncrypter keyEncrypter +} + +// SetCompression sets a compression algorithm to be applied before encryption. +func (ctx *genericEncrypter) SetCompression(compressionAlg CompressionAlgorithm) { + ctx.compressionAlg = compressionAlg +} + +// NewEncrypter creates an appropriate encrypter based on the key type +func NewEncrypter(alg KeyAlgorithm, enc ContentEncryption, encryptionKey interface{}) (Encrypter, error) { + encrypter := &genericEncrypter{ + contentAlg: enc, + compressionAlg: NONE, + recipients: []recipientKeyInfo{}, + cipher: getContentCipher(enc), + } + + if encrypter.cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + + var keyID string + var rawKey interface{} + switch encryptionKey := encryptionKey.(type) { + case *JsonWebKey: + keyID = encryptionKey.KeyID + rawKey = encryptionKey.Key + default: + rawKey = encryptionKey + } + + switch alg { + case DIRECT: + // Direct encryption mode must be treated differently + if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { + return nil, ErrUnsupportedKeyType + } + encrypter.keyGenerator = staticKeyGenerator{ + key: rawKey.([]byte), + } + recipient, _ := newSymmetricRecipient(alg, rawKey.([]byte)) + if keyID != "" { + recipient.keyID = keyID + } + encrypter.recipients = []recipientKeyInfo{recipient} + return encrypter, nil + case ECDH_ES: + // ECDH-ES (w/o key wrapping) is similar to DIRECT mode + typeOf := reflect.TypeOf(rawKey) + if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { + return nil, ErrUnsupportedKeyType + } + encrypter.keyGenerator = ecKeyGenerator{ + size: encrypter.cipher.keySize(), + algID: string(enc), + publicKey: rawKey.(*ecdsa.PublicKey), + } + recipient, _ := newECDHRecipient(alg, rawKey.(*ecdsa.PublicKey)) + if keyID != "" { + recipient.keyID = keyID + } + encrypter.recipients = []recipientKeyInfo{recipient} + return encrypter, nil + default: + // Can just add a standard recipient + encrypter.keyGenerator = randomKeyGenerator{ + size: encrypter.cipher.keySize(), + } + err := encrypter.AddRecipient(alg, encryptionKey) + return encrypter, err + } +} + +// NewMultiEncrypter creates a multi-encrypter based on the given parameters +func NewMultiEncrypter(enc ContentEncryption) (MultiEncrypter, error) { + cipher := getContentCipher(enc) + + if cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + + encrypter := &genericEncrypter{ + contentAlg: enc, + compressionAlg: NONE, + recipients: []recipientKeyInfo{}, + cipher: cipher, + keyGenerator: randomKeyGenerator{ + size: cipher.keySize(), + }, + } + + return encrypter, nil +} + +func (ctx *genericEncrypter) AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) (err error) { + var recipient recipientKeyInfo + + switch alg { + case DIRECT, ECDH_ES: + return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", alg) + } + + recipient, err = makeJWERecipient(alg, encryptionKey) + + if err == nil { + ctx.recipients = append(ctx.recipients, recipient) + } + return err +} + +func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { + switch encryptionKey := encryptionKey.(type) { + case *rsa.PublicKey: + return newRSARecipient(alg, encryptionKey) + case *ecdsa.PublicKey: + return newECDHRecipient(alg, encryptionKey) + case []byte: + return newSymmetricRecipient(alg, encryptionKey) + case *JsonWebKey: + recipient, err := makeJWERecipient(alg, encryptionKey.Key) + if err == nil && encryptionKey.KeyID != "" { + recipient.keyID = encryptionKey.KeyID + } + return recipient, err + default: + return recipientKeyInfo{}, ErrUnsupportedKeyType + } +} + +// newDecrypter creates an appropriate decrypter based on the key type +func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { + switch decryptionKey := decryptionKey.(type) { + case *rsa.PrivateKey: + return &rsaDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case *ecdsa.PrivateKey: + return &ecDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case []byte: + return &symmetricKeyCipher{ + key: decryptionKey, + }, nil + case *JsonWebKey: + return newDecrypter(decryptionKey.Key) + default: + return nil, ErrUnsupportedKeyType + } +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JsonWebEncryption, error) { + return ctx.EncryptWithAuthData(plaintext, nil) +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JsonWebEncryption, error) { + obj := &JsonWebEncryption{} + obj.aad = aad + + obj.protected = &rawHeader{ + Enc: ctx.contentAlg, + } + obj.recipients = make([]recipientInfo, len(ctx.recipients)) + + if len(ctx.recipients) == 0 { + return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to") + } + + cek, headers, err := ctx.keyGenerator.genKey() + if err != nil { + return nil, err + } + + obj.protected.merge(&headers) + + for i, info := range ctx.recipients { + recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) + if err != nil { + return nil, err + } + + recipient.header.Alg = string(info.keyAlg) + if info.keyID != "" { + recipient.header.Kid = info.keyID + } + obj.recipients[i] = recipient + } + + if len(ctx.recipients) == 1 { + // Move per-recipient headers into main protected header if there's + // only a single recipient. + obj.protected.merge(obj.recipients[0].header) + obj.recipients[0].header = nil + } + + if ctx.compressionAlg != NONE { + plaintext, err = compress(ctx.compressionAlg, plaintext) + if err != nil { + return nil, err + } + + obj.protected.Zip = ctx.compressionAlg + } + + authData := obj.computeAuthData() + parts, err := ctx.cipher.encrypt(cek, authData, plaintext) + if err != nil { + return nil, err + } + + obj.iv = parts.iv + obj.ciphertext = parts.ciphertext + obj.tag = parts.tag + + return obj, nil +} + +// Decrypt and validate the object and return the plaintext. +func (obj JsonWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { + headers := obj.mergedHeaders(nil) + + if len(headers.Crit) > 0 { + return nil, fmt.Errorf("square/go-jose: unsupported crit header") + } + + decrypter, err := newDecrypter(decryptionKey) + if err != nil { + return nil, err + } + + cipher := getContentCipher(headers.Enc) + if cipher == nil { + return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.Enc)) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + var plaintext []byte + for _, recipient := range obj.recipients { + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + if err == nil { + break + } + } + } + + if plaintext == nil { + return nil, ErrCryptoFailure + } + + // The "zip" header paramter may only be present in the protected header. + if obj.protected.Zip != "" { + plaintext, err = decompress(obj.protected.Zip, plaintext) + } + + return plaintext, err +} diff --git a/vendor/gopkg.in/square/go-jose.v1/doc.go b/vendor/gopkg.in/square/go-jose.v1/doc.go new file mode 100644 index 000000000000..b4cd1e989b3e --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v1/doc.go @@ -0,0 +1,26 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. For the moment, it mainly focuses on +encryption and signing based on the JSON Web Encryption and JSON Web Signature +standards. The library supports both the compact and full serialization +formats, and has optional support for multiple recipients. + +*/ +package jose // import "gopkg.in/square/go-jose.v1" diff --git a/vendor/gopkg.in/square/go-jose.v1/encoding.go b/vendor/gopkg.in/square/go-jose.v1/encoding.go new file mode 100644 index 000000000000..3e2ac0ae9d18 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v1/encoding.go @@ -0,0 +1,191 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "bytes" + "compress/flate" + "encoding/base64" + "encoding/binary" + "io" + "math/big" + "regexp" + "strings" +) + +var stripWhitespaceRegex = regexp.MustCompile("\\s") + +// Url-safe base64 encode that strips padding +func base64URLEncode(data []byte) string { + var result = base64.URLEncoding.EncodeToString(data) + return strings.TrimRight(result, "=") +} + +// Url-safe base64 decoder that adds padding +func base64URLDecode(data string) ([]byte, error) { + var missing = (4 - len(data)%4) % 4 + data += strings.Repeat("=", missing) + return base64.URLEncoding.DecodeString(data) +} + +// Helper function to serialize known-good objects. +// Precondition: value is not a nil pointer. +func mustSerializeJSON(value interface{}) []byte { + out, err := MarshalJSON(value) + if err != nil { + panic(err) + } + // We never want to serialize the top-level value "null," since it's not a + // valid JOSE message. But if a caller passes in a nil pointer to this method, + // MarshalJSON will happily serialize it as the top-level value "null". If + // that value is then embedded in another operation, for instance by being + // base64-encoded and fed as input to a signing algorithm + // (https://github.com/square/go-jose/issues/22), the result will be + // incorrect. Because this method is intended for known-good objects, and a nil + // pointer is not a known-good object, we are free to panic in this case. + // Note: It's not possible to directly check whether the data pointed at by an + // interface is a nil pointer, so we do this hacky workaround. + // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I + if string(out) == "null" { + panic("Tried to serialize a nil pointer.") + } + return out +} + +// Strip all newlines and whitespace +func stripWhitespace(data string) string { + return stripWhitespaceRegex.ReplaceAllString(data, "") +} + +// Perform compression based on algorithm +func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return deflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Perform decompression based on algorithm +func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return inflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Compress with DEFLATE +func deflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + + // Writing to byte buffer, err is always nil + writer, _ := flate.NewWriter(output, 1) + _, _ = io.Copy(writer, bytes.NewBuffer(input)) + + err := writer.Close() + return output.Bytes(), err +} + +// Decompress with DEFLATE +func inflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + reader := flate.NewReader(bytes.NewBuffer(input)) + + _, err := io.Copy(output, reader) + if err != nil { + return nil, err + } + + err = reader.Close() + return output.Bytes(), err +} + +// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. +type byteBuffer struct { + data []byte +} + +func newBuffer(data []byte) *byteBuffer { + if data == nil { + return nil + } + return &byteBuffer{ + data: data, + } +} + +func newFixedSizeBuffer(data []byte, length int) *byteBuffer { + if len(data) > length { + panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") + } + pad := make([]byte, length-len(data)) + return newBuffer(append(pad, data...)) +} + +func newBufferFromInt(num uint64) *byteBuffer { + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, num) + return newBuffer(bytes.TrimLeft(data, "\x00")) +} + +func (b *byteBuffer) MarshalJSON() ([]byte, error) { + return MarshalJSON(b.base64()) +} + +func (b *byteBuffer) UnmarshalJSON(data []byte) error { + var encoded string + err := UnmarshalJSON(data, &encoded) + if err != nil { + return err + } + + if encoded == "" { + return nil + } + + decoded, err := base64URLDecode(encoded) + if err != nil { + return err + } + + *b = *newBuffer(decoded) + + return nil +} + +func (b *byteBuffer) base64() string { + return base64URLEncode(b.data) +} + +func (b *byteBuffer) bytes() []byte { + // Handling nil here allows us to transparently handle nil slices when serializing. + if b == nil { + return nil + } + return b.data +} + +func (b byteBuffer) bigInt() *big.Int { + return new(big.Int).SetBytes(b.data) +} + +func (b byteBuffer) toInt() int { + return int(b.bigInt().Int64()) +} diff --git a/vendor/gopkg.in/square/go-jose.v1/json/LICENSE b/vendor/gopkg.in/square/go-jose.v1/json/LICENSE new file mode 100644 index 000000000000..74487567632c --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v1/json/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/square/go-jose.v1/json/README.md b/vendor/gopkg.in/square/go-jose.v1/json/README.md new file mode 100644 index 000000000000..86de5e5581f5 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v1/json/README.md @@ -0,0 +1,13 @@ +# Safe JSON + +This repository contains a fork of the `encoding/json` package from Go 1.6. + +The following changes were made: + +* Object deserialization uses case-sensitive member name matching instead of + [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). + This is to avoid differences in the interpretation of JOSE messages between + go-jose and libraries written in other languages. +* When deserializing a JSON object, we check for duplicate keys and reject the + input whenever we detect a duplicate. Rather than trying to work with malformed + data, we prefer to reject it right away. diff --git a/vendor/gopkg.in/square/go-jose.v1/json/decode.go b/vendor/gopkg.in/square/go-jose.v1/json/decode.go new file mode 100644 index 000000000000..37457e5a8347 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v1/json/decode.go @@ -0,0 +1,1183 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a string-keyed map, Unmarshal first +// establishes a map to use, If the map is nil, Unmarshal allocates a new map. +// Otherwise Unmarshal reuses the existing map, keeping existing entries. +// Unmarshal then stores key-value pairs from the JSON object into the map. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and http://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + useNumber bool +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else { + d.scan.step(&d.scan, ']') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := d.data[d.off] + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: struct or map[string]T + switch v.Kind() { + case reflect.Map: + // map must have string kind + t := v.Type() + if t.Key().Kind() != reflect.String { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, []byte(key)) { + f = ff + break + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kv := reflect.ValueOf(key).Convert(v.Type().Key()) + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + return + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) + } + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/gopkg.in/square/go-jose.v1/json/encode.go b/vendor/gopkg.in/square/go-jose.v1/json/encode.go new file mode 100644 index 000000000000..1dae8bb7cd80 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v1/json/encode.go @@ -0,0 +1,1197 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON objects as defined in +// RFC 4627. The mapping between JSON objects and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON object. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. +// The map's key type must be string; the map keys are used as JSON object +// keys, subject to the UTF-8 coercion described for string values above. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON object. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON object. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML